agno 2.1.0__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +13 -1
- agno/db/base.py +8 -4
- agno/db/dynamo/dynamo.py +69 -17
- agno/db/firestore/firestore.py +68 -29
- agno/db/gcs_json/gcs_json_db.py +68 -17
- agno/db/in_memory/in_memory_db.py +83 -14
- agno/db/json/json_db.py +79 -15
- agno/db/mongo/mongo.py +92 -74
- agno/db/mysql/mysql.py +17 -3
- agno/db/postgres/postgres.py +21 -3
- agno/db/redis/redis.py +38 -11
- agno/db/singlestore/singlestore.py +14 -3
- agno/db/sqlite/sqlite.py +34 -46
- agno/db/utils.py +50 -22
- agno/knowledge/knowledge.py +6 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +294 -0
- agno/knowledge/reader/pdf_reader.py +28 -52
- agno/knowledge/reader/reader_factory.py +12 -0
- agno/memory/manager.py +12 -4
- agno/models/anthropic/claude.py +4 -1
- agno/models/aws/bedrock.py +52 -112
- agno/models/openai/responses.py +1 -1
- agno/os/app.py +24 -30
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +252 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/agui.py +21 -5
- agno/os/interfaces/agui/router.py +12 -0
- agno/os/interfaces/base.py +4 -2
- agno/os/interfaces/slack/slack.py +13 -8
- agno/os/interfaces/whatsapp/whatsapp.py +12 -5
- agno/os/mcp.py +1 -1
- agno/os/router.py +39 -9
- agno/os/routers/memory/memory.py +5 -3
- agno/os/routers/memory/schemas.py +1 -0
- agno/os/utils.py +36 -10
- agno/run/base.py +2 -13
- agno/team/team.py +13 -1
- agno/tools/mcp.py +46 -1
- agno/utils/merge_dict.py +22 -1
- agno/utils/serialize.py +32 -0
- agno/utils/streamlit.py +1 -1
- agno/workflow/parallel.py +90 -14
- agno/workflow/step.py +30 -27
- agno/workflow/types.py +4 -6
- agno/workflow/workflow.py +5 -3
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/METADATA +16 -14
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/RECORD +53 -47
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/WHEEL +0 -0
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.0.dist-info → agno-2.1.2.dist-info}/top_level.txt +0 -0
agno/models/aws/bedrock.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple, Ty
|
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
8
|
from agno.exceptions import AgnoError, ModelProviderError
|
|
9
|
-
from agno.models.base import
|
|
9
|
+
from agno.models.base import Model
|
|
10
10
|
from agno.models.message import Message
|
|
11
11
|
from agno.models.metrics import Metrics
|
|
12
12
|
from agno.models.response import ModelResponse
|
|
@@ -360,7 +360,7 @@ class AwsBedrock(Model):
|
|
|
360
360
|
formatted_messages, system_message = self._format_messages(messages)
|
|
361
361
|
|
|
362
362
|
tool_config = None
|
|
363
|
-
if tools
|
|
363
|
+
if tools:
|
|
364
364
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
365
365
|
|
|
366
366
|
body = {
|
|
@@ -408,7 +408,7 @@ class AwsBedrock(Model):
|
|
|
408
408
|
formatted_messages, system_message = self._format_messages(messages)
|
|
409
409
|
|
|
410
410
|
tool_config = None
|
|
411
|
-
if tools
|
|
411
|
+
if tools:
|
|
412
412
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
413
413
|
|
|
414
414
|
body = {
|
|
@@ -426,10 +426,14 @@ class AwsBedrock(Model):
|
|
|
426
426
|
|
|
427
427
|
assistant_message.metrics.start_timer()
|
|
428
428
|
|
|
429
|
+
# Track current tool being built across chunks
|
|
430
|
+
current_tool: Dict[str, Any] = {}
|
|
431
|
+
|
|
429
432
|
for chunk in self.get_client().converse_stream(modelId=self.id, messages=formatted_messages, **body)[
|
|
430
433
|
"stream"
|
|
431
434
|
]:
|
|
432
|
-
|
|
435
|
+
model_response, current_tool = self._parse_provider_response_delta(chunk, current_tool)
|
|
436
|
+
yield model_response
|
|
433
437
|
|
|
434
438
|
assistant_message.metrics.stop_timer()
|
|
435
439
|
|
|
@@ -456,7 +460,7 @@ class AwsBedrock(Model):
|
|
|
456
460
|
formatted_messages, system_message = self._format_messages(messages)
|
|
457
461
|
|
|
458
462
|
tool_config = None
|
|
459
|
-
if tools
|
|
463
|
+
if tools:
|
|
460
464
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
461
465
|
|
|
462
466
|
body = {
|
|
@@ -507,7 +511,7 @@ class AwsBedrock(Model):
|
|
|
507
511
|
formatted_messages, system_message = self._format_messages(messages)
|
|
508
512
|
|
|
509
513
|
tool_config = None
|
|
510
|
-
if tools
|
|
514
|
+
if tools:
|
|
511
515
|
tool_config = {"tools": self._format_tools_for_request(tools)}
|
|
512
516
|
|
|
513
517
|
body = {
|
|
@@ -525,10 +529,14 @@ class AwsBedrock(Model):
|
|
|
525
529
|
|
|
526
530
|
assistant_message.metrics.start_timer()
|
|
527
531
|
|
|
532
|
+
# Track current tool being built across chunks
|
|
533
|
+
current_tool: Dict[str, Any] = {}
|
|
534
|
+
|
|
528
535
|
async with self.get_async_client() as client:
|
|
529
536
|
response = await client.converse_stream(modelId=self.id, messages=formatted_messages, **body)
|
|
530
537
|
async for chunk in response["stream"]:
|
|
531
|
-
|
|
538
|
+
model_response, current_tool = self._parse_provider_response_delta(chunk, current_tool)
|
|
539
|
+
yield model_response
|
|
532
540
|
|
|
533
541
|
assistant_message.metrics.stop_timer()
|
|
534
542
|
|
|
@@ -617,122 +625,54 @@ class AwsBedrock(Model):
|
|
|
617
625
|
|
|
618
626
|
return model_response
|
|
619
627
|
|
|
620
|
-
def
|
|
621
|
-
self,
|
|
622
|
-
|
|
623
|
-
assistant_message: Message,
|
|
624
|
-
stream_data: MessageData,
|
|
625
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
626
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
627
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
628
|
-
run_response: Optional[RunOutput] = None,
|
|
629
|
-
) -> Iterator[ModelResponse]:
|
|
630
|
-
"""
|
|
631
|
-
Process the synchronous response stream.
|
|
632
|
-
|
|
633
|
-
Args:
|
|
634
|
-
messages (List[Message]): The messages to include in the request.
|
|
635
|
-
assistant_message (Message): The assistant message.
|
|
636
|
-
stream_data (MessageData): The stream data.
|
|
637
|
-
"""
|
|
638
|
-
for response_delta in self.invoke_stream(
|
|
639
|
-
messages=messages,
|
|
640
|
-
assistant_message=assistant_message,
|
|
641
|
-
response_format=response_format,
|
|
642
|
-
tools=tools,
|
|
643
|
-
tool_choice=tool_choice,
|
|
644
|
-
run_response=run_response,
|
|
645
|
-
):
|
|
646
|
-
should_yield = False
|
|
647
|
-
|
|
648
|
-
if response_delta.content:
|
|
649
|
-
stream_data.response_content += response_delta.content
|
|
650
|
-
should_yield = True
|
|
651
|
-
|
|
652
|
-
if response_delta.tool_calls:
|
|
653
|
-
if stream_data.response_tool_calls is None:
|
|
654
|
-
stream_data.response_tool_calls = []
|
|
655
|
-
stream_data.response_tool_calls.extend(response_delta.tool_calls)
|
|
656
|
-
should_yield = True
|
|
657
|
-
|
|
658
|
-
if should_yield:
|
|
659
|
-
yield response_delta
|
|
660
|
-
|
|
661
|
-
async def aprocess_response_stream(
|
|
662
|
-
self,
|
|
663
|
-
messages: List[Message],
|
|
664
|
-
assistant_message: Message,
|
|
665
|
-
stream_data: MessageData,
|
|
666
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
667
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
668
|
-
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
669
|
-
run_response: Optional[RunOutput] = None,
|
|
670
|
-
) -> AsyncIterator[ModelResponse]:
|
|
671
|
-
"""
|
|
672
|
-
Process the asynchronous response stream.
|
|
673
|
-
|
|
674
|
-
Args:
|
|
675
|
-
messages (List[Message]): The messages to include in the request.
|
|
676
|
-
assistant_message (Message): The assistant message.
|
|
677
|
-
stream_data (MessageData): The stream data.
|
|
678
|
-
"""
|
|
679
|
-
async for response_delta in self.ainvoke_stream(
|
|
680
|
-
messages=messages,
|
|
681
|
-
assistant_message=assistant_message,
|
|
682
|
-
response_format=response_format,
|
|
683
|
-
tools=tools,
|
|
684
|
-
tool_choice=tool_choice,
|
|
685
|
-
run_response=run_response,
|
|
686
|
-
):
|
|
687
|
-
should_yield = False
|
|
688
|
-
|
|
689
|
-
if response_delta.content:
|
|
690
|
-
stream_data.response_content += response_delta.content
|
|
691
|
-
should_yield = True
|
|
692
|
-
|
|
693
|
-
if response_delta.tool_calls:
|
|
694
|
-
if stream_data.response_tool_calls is None:
|
|
695
|
-
stream_data.response_tool_calls = []
|
|
696
|
-
stream_data.response_tool_calls.extend(response_delta.tool_calls)
|
|
697
|
-
should_yield = True
|
|
698
|
-
|
|
699
|
-
if should_yield:
|
|
700
|
-
yield response_delta
|
|
701
|
-
|
|
702
|
-
self._populate_assistant_message(assistant_message=assistant_message, provider_response=response_delta)
|
|
703
|
-
|
|
704
|
-
def _parse_provider_response_delta(self, response_delta: Dict[str, Any]) -> ModelResponse: # type: ignore
|
|
628
|
+
def _parse_provider_response_delta(
|
|
629
|
+
self, response_delta: Dict[str, Any], current_tool: Dict[str, Any]
|
|
630
|
+
) -> Tuple[ModelResponse, Dict[str, Any]]:
|
|
705
631
|
"""Parse the provider response delta for streaming.
|
|
706
632
|
|
|
707
633
|
Args:
|
|
708
634
|
response_delta: The streaming response delta from AWS Bedrock
|
|
635
|
+
current_tool: The current tool being built across chunks
|
|
709
636
|
|
|
710
637
|
Returns:
|
|
711
|
-
ModelResponse: The parsed model response delta
|
|
638
|
+
Tuple[ModelResponse, Dict[str, Any]]: The parsed model response delta and updated current_tool
|
|
712
639
|
"""
|
|
713
640
|
model_response = ModelResponse(role="assistant")
|
|
714
641
|
|
|
715
|
-
# Handle contentBlockDelta - text content
|
|
716
|
-
if "contentBlockDelta" in response_delta:
|
|
717
|
-
delta = response_delta["contentBlockDelta"]["delta"]
|
|
718
|
-
if "text" in delta:
|
|
719
|
-
model_response.content = delta["text"]
|
|
720
|
-
|
|
721
642
|
# Handle contentBlockStart - tool use start
|
|
722
|
-
|
|
643
|
+
if "contentBlockStart" in response_delta:
|
|
723
644
|
start = response_delta["contentBlockStart"]["start"]
|
|
724
645
|
if "toolUse" in start:
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
646
|
+
# Start a new tool
|
|
647
|
+
tool_use_data = start["toolUse"]
|
|
648
|
+
current_tool = {
|
|
649
|
+
"id": tool_use_data.get("toolUseId", ""),
|
|
650
|
+
"type": "function",
|
|
651
|
+
"function": {
|
|
652
|
+
"name": tool_use_data.get("name", ""),
|
|
653
|
+
"arguments": "", # Will be filled in subsequent deltas
|
|
654
|
+
},
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
# Handle contentBlockDelta - text content or tool input
|
|
658
|
+
elif "contentBlockDelta" in response_delta:
|
|
659
|
+
delta = response_delta["contentBlockDelta"]["delta"]
|
|
660
|
+
if "text" in delta:
|
|
661
|
+
model_response.content = delta["text"]
|
|
662
|
+
elif "toolUse" in delta and current_tool:
|
|
663
|
+
# Accumulate tool input
|
|
664
|
+
tool_input = delta["toolUse"].get("input", "")
|
|
665
|
+
if tool_input:
|
|
666
|
+
current_tool["function"]["arguments"] += tool_input
|
|
667
|
+
|
|
668
|
+
# Handle contentBlockStop - tool use complete
|
|
669
|
+
elif "contentBlockStop" in response_delta and current_tool:
|
|
670
|
+
# Tool is complete, add it to model response
|
|
671
|
+
model_response.tool_calls = [current_tool]
|
|
672
|
+
# Track tool_id in extra for format_function_call_results
|
|
673
|
+
model_response.extra = {"tool_ids": [current_tool["id"]]}
|
|
674
|
+
# Reset current_tool for next tool
|
|
675
|
+
current_tool = {}
|
|
736
676
|
|
|
737
677
|
# Handle metadata/usage information
|
|
738
678
|
elif "metadata" in response_delta or "messageStop" in response_delta:
|
|
@@ -740,7 +680,7 @@ class AwsBedrock(Model):
|
|
|
740
680
|
if "usage" in body:
|
|
741
681
|
model_response.response_usage = self._get_metrics(body["usage"])
|
|
742
682
|
|
|
743
|
-
return model_response
|
|
683
|
+
return model_response, current_tool
|
|
744
684
|
|
|
745
685
|
def _get_metrics(self, response_usage: Dict[str, Any]) -> Metrics:
|
|
746
686
|
"""
|
agno/models/openai/responses.py
CHANGED
|
@@ -45,7 +45,7 @@ class OpenAIResponses(Model):
|
|
|
45
45
|
parallel_tool_calls: Optional[bool] = None
|
|
46
46
|
reasoning: Optional[Dict[str, Any]] = None
|
|
47
47
|
verbosity: Optional[Literal["low", "medium", "high"]] = None
|
|
48
|
-
reasoning_effort: Optional[Literal["minimal", "medium", "high"]] = None
|
|
48
|
+
reasoning_effort: Optional[Literal["minimal", "low", "medium", "high"]] = None
|
|
49
49
|
reasoning_summary: Optional[Literal["auto", "concise", "detailed"]] = None
|
|
50
50
|
store: Optional[bool] = None
|
|
51
51
|
temperature: Optional[float] = None
|
agno/os/app.py
CHANGED
|
@@ -68,7 +68,6 @@ class AgentOS:
|
|
|
68
68
|
def __init__(
|
|
69
69
|
self,
|
|
70
70
|
id: Optional[str] = None,
|
|
71
|
-
os_id: Optional[str] = None, # Deprecated
|
|
72
71
|
name: Optional[str] = None,
|
|
73
72
|
description: Optional[str] = None,
|
|
74
73
|
version: Optional[str] = None,
|
|
@@ -76,16 +75,18 @@ class AgentOS:
|
|
|
76
75
|
teams: Optional[List[Team]] = None,
|
|
77
76
|
workflows: Optional[List[Workflow]] = None,
|
|
78
77
|
interfaces: Optional[List[BaseInterface]] = None,
|
|
78
|
+
a2a_interface: bool = False,
|
|
79
79
|
config: Optional[Union[str, AgentOSConfig]] = None,
|
|
80
80
|
settings: Optional[AgnoAPISettings] = None,
|
|
81
81
|
lifespan: Optional[Any] = None,
|
|
82
|
-
enable_mcp: bool = False, # Deprecated
|
|
83
82
|
enable_mcp_server: bool = False,
|
|
84
|
-
fastapi_app: Optional[FastAPI] = None, # Deprecated
|
|
85
83
|
base_app: Optional[FastAPI] = None,
|
|
86
|
-
replace_routes: Optional[bool] = None, # Deprecated
|
|
87
84
|
on_route_conflict: Literal["preserve_agentos", "preserve_base_app", "error"] = "preserve_agentos",
|
|
88
85
|
telemetry: bool = True,
|
|
86
|
+
os_id: Optional[str] = None, # Deprecated
|
|
87
|
+
enable_mcp: bool = False, # Deprecated
|
|
88
|
+
fastapi_app: Optional[FastAPI] = None, # Deprecated
|
|
89
|
+
replace_routes: Optional[bool] = None, # Deprecated
|
|
89
90
|
):
|
|
90
91
|
"""Initialize AgentOS.
|
|
91
92
|
|
|
@@ -98,6 +99,7 @@ class AgentOS:
|
|
|
98
99
|
teams: List of teams to include in the OS
|
|
99
100
|
workflows: List of workflows to include in the OS
|
|
100
101
|
interfaces: List of interfaces to include in the OS
|
|
102
|
+
a2a_interface: Whether to expose the OS agents and teams in an A2A server
|
|
101
103
|
config: Configuration file path or AgentOSConfig instance
|
|
102
104
|
settings: API settings for the OS
|
|
103
105
|
lifespan: Optional lifespan context manager for the FastAPI app
|
|
@@ -105,6 +107,7 @@ class AgentOS:
|
|
|
105
107
|
base_app: Optional base FastAPI app to use for the AgentOS. All routes and middleware will be added to this app.
|
|
106
108
|
on_route_conflict: What to do when a route conflict is detected in case a custom base_app is provided.
|
|
107
109
|
telemetry: Whether to enable telemetry
|
|
110
|
+
|
|
108
111
|
"""
|
|
109
112
|
if not agents and not workflows and not teams:
|
|
110
113
|
raise ValueError("Either agents, teams or workflows must be provided.")
|
|
@@ -115,6 +118,7 @@ class AgentOS:
|
|
|
115
118
|
self.workflows: Optional[List[Workflow]] = workflows
|
|
116
119
|
self.teams: Optional[List[Team]] = teams
|
|
117
120
|
self.interfaces = interfaces or []
|
|
121
|
+
self.a2a_interface = a2a_interface
|
|
118
122
|
|
|
119
123
|
self.settings: AgnoAPISettings = settings or AgnoAPISettings()
|
|
120
124
|
|
|
@@ -263,10 +267,21 @@ class AgentOS:
|
|
|
263
267
|
self._add_router(fastapi_app, get_health_router())
|
|
264
268
|
self._add_router(fastapi_app, get_home_router(self))
|
|
265
269
|
|
|
270
|
+
has_a2a_interface = False
|
|
266
271
|
for interface in self.interfaces:
|
|
272
|
+
if not has_a2a_interface and interface.__class__.__name__ == "A2A":
|
|
273
|
+
has_a2a_interface = True
|
|
267
274
|
interface_router = interface.get_router()
|
|
268
275
|
self._add_router(fastapi_app, interface_router)
|
|
269
276
|
|
|
277
|
+
# Add A2A interface if requested and not provided in self.interfaces
|
|
278
|
+
if self.a2a_interface and not has_a2a_interface:
|
|
279
|
+
from agno.os.interfaces.a2a import A2A
|
|
280
|
+
|
|
281
|
+
a2a_interface = A2A(agents=self.agents, teams=self.teams, workflows=self.workflows)
|
|
282
|
+
self.interfaces.append(a2a_interface)
|
|
283
|
+
self._add_router(fastapi_app, a2a_interface.get_router())
|
|
284
|
+
|
|
270
285
|
self._auto_discover_databases()
|
|
271
286
|
self._auto_discover_knowledge_instances()
|
|
272
287
|
|
|
@@ -400,18 +415,12 @@ class AgentOS:
|
|
|
400
415
|
self._register_db_with_validation(dbs, agent.db)
|
|
401
416
|
if agent.knowledge and agent.knowledge.contents_db:
|
|
402
417
|
self._register_db_with_validation(knowledge_dbs, agent.knowledge.contents_db)
|
|
403
|
-
# Also add to general dbs if it's used for both purposes
|
|
404
|
-
if agent.knowledge.contents_db.id not in dbs:
|
|
405
|
-
self._register_db_with_validation(dbs, agent.knowledge.contents_db)
|
|
406
418
|
|
|
407
419
|
for team in self.teams or []:
|
|
408
420
|
if team.db:
|
|
409
421
|
self._register_db_with_validation(dbs, team.db)
|
|
410
422
|
if team.knowledge and team.knowledge.contents_db:
|
|
411
423
|
self._register_db_with_validation(knowledge_dbs, team.knowledge.contents_db)
|
|
412
|
-
# Also add to general dbs if it's used for both purposes
|
|
413
|
-
if team.knowledge.contents_db.id not in dbs:
|
|
414
|
-
self._register_db_with_validation(dbs, team.knowledge.contents_db)
|
|
415
424
|
|
|
416
425
|
for workflow in self.workflows or []:
|
|
417
426
|
if workflow.db:
|
|
@@ -488,7 +497,6 @@ class AgentOS:
|
|
|
488
497
|
if session_config.dbs is None:
|
|
489
498
|
session_config.dbs = []
|
|
490
499
|
|
|
491
|
-
multiple_dbs: bool = len(self.dbs.keys()) > 1
|
|
492
500
|
dbs_with_specific_config = [db.db_id for db in session_config.dbs]
|
|
493
501
|
|
|
494
502
|
for db_id in self.dbs.keys():
|
|
@@ -496,9 +504,7 @@ class AgentOS:
|
|
|
496
504
|
session_config.dbs.append(
|
|
497
505
|
DatabaseConfig(
|
|
498
506
|
db_id=db_id,
|
|
499
|
-
domain_config=SessionDomainConfig(
|
|
500
|
-
display_name="Sessions" if not multiple_dbs else "Sessions in database '" + db_id + "'"
|
|
501
|
-
),
|
|
507
|
+
domain_config=SessionDomainConfig(display_name=db_id),
|
|
502
508
|
)
|
|
503
509
|
)
|
|
504
510
|
|
|
@@ -510,7 +516,6 @@ class AgentOS:
|
|
|
510
516
|
if memory_config.dbs is None:
|
|
511
517
|
memory_config.dbs = []
|
|
512
518
|
|
|
513
|
-
multiple_dbs: bool = len(self.dbs.keys()) > 1
|
|
514
519
|
dbs_with_specific_config = [db.db_id for db in memory_config.dbs]
|
|
515
520
|
|
|
516
521
|
for db_id in self.dbs.keys():
|
|
@@ -518,9 +523,7 @@ class AgentOS:
|
|
|
518
523
|
memory_config.dbs.append(
|
|
519
524
|
DatabaseConfig(
|
|
520
525
|
db_id=db_id,
|
|
521
|
-
domain_config=MemoryDomainConfig(
|
|
522
|
-
display_name="Memory" if not multiple_dbs else "Memory in database '" + db_id + "'"
|
|
523
|
-
),
|
|
526
|
+
domain_config=MemoryDomainConfig(display_name=db_id),
|
|
524
527
|
)
|
|
525
528
|
)
|
|
526
529
|
|
|
@@ -532,7 +535,6 @@ class AgentOS:
|
|
|
532
535
|
if knowledge_config.dbs is None:
|
|
533
536
|
knowledge_config.dbs = []
|
|
534
537
|
|
|
535
|
-
multiple_knowledge_dbs: bool = len(self.knowledge_dbs.keys()) > 1
|
|
536
538
|
dbs_with_specific_config = [db.db_id for db in knowledge_config.dbs]
|
|
537
539
|
|
|
538
540
|
# Only add databases that are actually used for knowledge contents
|
|
@@ -541,9 +543,7 @@ class AgentOS:
|
|
|
541
543
|
knowledge_config.dbs.append(
|
|
542
544
|
DatabaseConfig(
|
|
543
545
|
db_id=db_id,
|
|
544
|
-
domain_config=KnowledgeDomainConfig(
|
|
545
|
-
display_name="Knowledge" if not multiple_knowledge_dbs else "Knowledge in database " + db_id
|
|
546
|
-
),
|
|
546
|
+
domain_config=KnowledgeDomainConfig(display_name=db_id),
|
|
547
547
|
)
|
|
548
548
|
)
|
|
549
549
|
|
|
@@ -555,7 +555,6 @@ class AgentOS:
|
|
|
555
555
|
if metrics_config.dbs is None:
|
|
556
556
|
metrics_config.dbs = []
|
|
557
557
|
|
|
558
|
-
multiple_dbs: bool = len(self.dbs.keys()) > 1
|
|
559
558
|
dbs_with_specific_config = [db.db_id for db in metrics_config.dbs]
|
|
560
559
|
|
|
561
560
|
for db_id in self.dbs.keys():
|
|
@@ -563,9 +562,7 @@ class AgentOS:
|
|
|
563
562
|
metrics_config.dbs.append(
|
|
564
563
|
DatabaseConfig(
|
|
565
564
|
db_id=db_id,
|
|
566
|
-
domain_config=MetricsDomainConfig(
|
|
567
|
-
display_name="Metrics" if not multiple_dbs else "Metrics in database '" + db_id + "'"
|
|
568
|
-
),
|
|
565
|
+
domain_config=MetricsDomainConfig(display_name=db_id),
|
|
569
566
|
)
|
|
570
567
|
)
|
|
571
568
|
|
|
@@ -577,7 +574,6 @@ class AgentOS:
|
|
|
577
574
|
if evals_config.dbs is None:
|
|
578
575
|
evals_config.dbs = []
|
|
579
576
|
|
|
580
|
-
multiple_dbs: bool = len(self.dbs.keys()) > 1
|
|
581
577
|
dbs_with_specific_config = [db.db_id for db in evals_config.dbs]
|
|
582
578
|
|
|
583
579
|
for db_id in self.dbs.keys():
|
|
@@ -585,9 +581,7 @@ class AgentOS:
|
|
|
585
581
|
evals_config.dbs.append(
|
|
586
582
|
DatabaseConfig(
|
|
587
583
|
db_id=db_id,
|
|
588
|
-
domain_config=EvalsDomainConfig(
|
|
589
|
-
display_name="Evals" if not multiple_dbs else "Evals in database '" + db_id + "'"
|
|
590
|
-
),
|
|
584
|
+
domain_config=EvalsDomainConfig(display_name=db_id),
|
|
591
585
|
)
|
|
592
586
|
)
|
|
593
587
|
|
agno/os/interfaces/__init__.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Main class for the A2A app, used to expose an Agno Agent, Team, or Workflow in an A2A compatible format."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from fastapi.routing import APIRouter
|
|
6
|
+
from typing_extensions import List
|
|
7
|
+
|
|
8
|
+
from agno.agent import Agent
|
|
9
|
+
from agno.os.interfaces.a2a.router import attach_routes
|
|
10
|
+
from agno.os.interfaces.base import BaseInterface
|
|
11
|
+
from agno.team import Team
|
|
12
|
+
from agno.workflow import Workflow
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class A2A(BaseInterface):
|
|
16
|
+
type = "a2a"
|
|
17
|
+
|
|
18
|
+
router: APIRouter
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
agents: Optional[List[Agent]] = None,
|
|
23
|
+
teams: Optional[List[Team]] = None,
|
|
24
|
+
workflows: Optional[List[Workflow]] = None,
|
|
25
|
+
prefix: str = "/a2a",
|
|
26
|
+
tags: Optional[List[str]] = None,
|
|
27
|
+
):
|
|
28
|
+
self.agents = agents
|
|
29
|
+
self.teams = teams
|
|
30
|
+
self.workflows = workflows
|
|
31
|
+
self.prefix = prefix
|
|
32
|
+
self.tags = tags or ["A2A"]
|
|
33
|
+
|
|
34
|
+
if not (self.agents or self.teams or self.workflows):
|
|
35
|
+
raise ValueError("Agents, Teams, or Workflows are required to setup the A2A interface.")
|
|
36
|
+
|
|
37
|
+
def get_router(self, **kwargs) -> APIRouter:
|
|
38
|
+
self.router = APIRouter(prefix=self.prefix, tags=self.tags) # type: ignore
|
|
39
|
+
|
|
40
|
+
self.router = attach_routes(router=self.router, agents=self.agents, teams=self.teams, workflows=self.workflows)
|
|
41
|
+
|
|
42
|
+
return self.router
|