agno 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +12 -0
- agno/db/base.py +8 -4
- agno/db/dynamo/dynamo.py +69 -17
- agno/db/firestore/firestore.py +65 -28
- agno/db/gcs_json/gcs_json_db.py +70 -17
- agno/db/in_memory/in_memory_db.py +85 -14
- agno/db/json/json_db.py +79 -15
- agno/db/mongo/mongo.py +27 -8
- agno/db/mysql/mysql.py +17 -3
- agno/db/postgres/postgres.py +21 -3
- agno/db/redis/redis.py +38 -11
- agno/db/singlestore/singlestore.py +14 -3
- agno/db/sqlite/sqlite.py +34 -46
- agno/knowledge/reader/field_labeled_csv_reader.py +294 -0
- agno/knowledge/reader/pdf_reader.py +28 -52
- agno/knowledge/reader/reader_factory.py +12 -0
- agno/memory/manager.py +12 -4
- agno/models/anthropic/claude.py +4 -1
- agno/models/aws/bedrock.py +52 -112
- agno/models/openrouter/openrouter.py +39 -1
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +74 -0
- agno/os/app.py +76 -32
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +252 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/router.py +12 -0
- agno/os/mcp.py +3 -3
- agno/os/router.py +38 -8
- agno/os/routers/memory/memory.py +5 -3
- agno/os/routers/memory/schemas.py +1 -0
- agno/os/utils.py +37 -10
- agno/team/team.py +12 -0
- agno/tools/file.py +4 -2
- agno/tools/mcp.py +46 -1
- agno/utils/merge_dict.py +22 -1
- agno/utils/streamlit.py +1 -1
- agno/workflow/parallel.py +90 -14
- agno/workflow/step.py +30 -27
- agno/workflow/workflow.py +12 -6
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/METADATA +16 -14
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/RECORD +46 -39
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/WHEEL +0 -0
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.1.dist-info → agno-2.1.3.dist-info}/top_level.txt +0 -0
|
@@ -34,12 +34,18 @@ async def run_agent(agent: Agent, run_input: RunAgentInput) -> AsyncIterator[Bas
|
|
|
34
34
|
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
|
|
35
35
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
|
|
36
36
|
|
|
37
|
+
# Look for user_id in run_input.forwarded_props
|
|
38
|
+
user_id = None
|
|
39
|
+
if run_input.forwarded_props and isinstance(run_input.forwarded_props, dict):
|
|
40
|
+
user_id = run_input.forwarded_props.get("user_id")
|
|
41
|
+
|
|
37
42
|
# Request streaming response from agent
|
|
38
43
|
response_stream = agent.arun(
|
|
39
44
|
input=messages,
|
|
40
45
|
session_id=run_input.thread_id,
|
|
41
46
|
stream=True,
|
|
42
47
|
stream_intermediate_steps=True,
|
|
48
|
+
user_id=user_id,
|
|
43
49
|
)
|
|
44
50
|
|
|
45
51
|
# Stream the response content in AG-UI format
|
|
@@ -64,12 +70,18 @@ async def run_team(team: Team, input: RunAgentInput) -> AsyncIterator[BaseEvent]
|
|
|
64
70
|
messages = convert_agui_messages_to_agno_messages(input.messages or [])
|
|
65
71
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
|
|
66
72
|
|
|
73
|
+
# Look for user_id in input.forwarded_props
|
|
74
|
+
user_id = None
|
|
75
|
+
if input.forwarded_props and isinstance(input.forwarded_props, dict):
|
|
76
|
+
user_id = input.forwarded_props.get("user_id")
|
|
77
|
+
|
|
67
78
|
# Request streaming response from team
|
|
68
79
|
response_stream = team.arun(
|
|
69
80
|
input=messages,
|
|
70
81
|
session_id=input.thread_id,
|
|
71
82
|
stream=True,
|
|
72
83
|
stream_intermediate_steps=True,
|
|
84
|
+
user_id=user_id,
|
|
73
85
|
)
|
|
74
86
|
|
|
75
87
|
# Stream the response content in AG-UI format
|
agno/os/mcp.py
CHANGED
|
@@ -78,21 +78,21 @@ def get_mcp_server(
|
|
|
78
78
|
agent = get_agent_by_id(agent_id, os.agents)
|
|
79
79
|
if agent is None:
|
|
80
80
|
raise Exception(f"Agent {agent_id} not found")
|
|
81
|
-
return agent.
|
|
81
|
+
return await agent.arun(message)
|
|
82
82
|
|
|
83
83
|
@mcp.tool(name="run_team", description="Run a team", tags={"core"}) # type: ignore
|
|
84
84
|
async def run_team(team_id: str, message: str) -> TeamRunOutput:
|
|
85
85
|
team = get_team_by_id(team_id, os.teams)
|
|
86
86
|
if team is None:
|
|
87
87
|
raise Exception(f"Team {team_id} not found")
|
|
88
|
-
return team.
|
|
88
|
+
return await team.arun(message)
|
|
89
89
|
|
|
90
90
|
@mcp.tool(name="run_workflow", description="Run a workflow", tags={"core"}) # type: ignore
|
|
91
91
|
async def run_workflow(workflow_id: str, message: str) -> WorkflowRunOutput:
|
|
92
92
|
workflow = get_workflow_by_id(workflow_id, os.workflows)
|
|
93
93
|
if workflow is None:
|
|
94
94
|
raise Exception(f"Workflow {workflow_id} not found")
|
|
95
|
-
return workflow.
|
|
95
|
+
return await workflow.arun(message)
|
|
96
96
|
|
|
97
97
|
# Session Management Tools
|
|
98
98
|
@mcp.tool(name="get_sessions_for_agent", description="Get list of sessions for an agent", tags={"session"}) # type: ignore
|
agno/os/router.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import json
|
|
2
|
+
from itertools import chain
|
|
2
3
|
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, List, Optional, Union, cast
|
|
3
4
|
from uuid import uuid4
|
|
4
5
|
|
|
@@ -643,7 +644,7 @@ def get_base_router(
|
|
|
643
644
|
os_id=os.id or "Unnamed OS",
|
|
644
645
|
description=os.description,
|
|
645
646
|
available_models=os.config.available_models if os.config else [],
|
|
646
|
-
databases=
|
|
647
|
+
databases=list({db.id for db in chain(os.dbs.values(), os.knowledge_dbs.values())}),
|
|
647
648
|
chat=os.config.chat if os.config else None,
|
|
648
649
|
session=os._get_session_config(),
|
|
649
650
|
memory=os._get_memory_config(),
|
|
@@ -784,19 +785,39 @@ def get_base_router(
|
|
|
784
785
|
|
|
785
786
|
if files:
|
|
786
787
|
for file in files:
|
|
787
|
-
if file.content_type in [
|
|
788
|
+
if file.content_type in [
|
|
789
|
+
"image/png",
|
|
790
|
+
"image/jpeg",
|
|
791
|
+
"image/jpg",
|
|
792
|
+
"image/gif",
|
|
793
|
+
"image/webp",
|
|
794
|
+
"image/bmp",
|
|
795
|
+
"image/tiff",
|
|
796
|
+
"image/tif",
|
|
797
|
+
"image/avif",
|
|
798
|
+
]:
|
|
788
799
|
try:
|
|
789
800
|
base64_image = process_image(file)
|
|
790
801
|
base64_images.append(base64_image)
|
|
791
802
|
except Exception as e:
|
|
792
803
|
log_error(f"Error processing image {file.filename}: {e}")
|
|
793
804
|
continue
|
|
794
|
-
elif file.content_type in [
|
|
805
|
+
elif file.content_type in [
|
|
806
|
+
"audio/wav",
|
|
807
|
+
"audio/wave",
|
|
808
|
+
"audio/mp3",
|
|
809
|
+
"audio/mpeg",
|
|
810
|
+
"audio/ogg",
|
|
811
|
+
"audio/mp4",
|
|
812
|
+
"audio/m4a",
|
|
813
|
+
"audio/aac",
|
|
814
|
+
"audio/flac",
|
|
815
|
+
]:
|
|
795
816
|
try:
|
|
796
|
-
|
|
797
|
-
base64_audios.append(
|
|
817
|
+
audio = process_audio(file)
|
|
818
|
+
base64_audios.append(audio)
|
|
798
819
|
except Exception as e:
|
|
799
|
-
log_error(f"Error processing audio {file.filename}: {e}")
|
|
820
|
+
log_error(f"Error processing audio {file.filename} with content type {file.content_type}: {e}")
|
|
800
821
|
continue
|
|
801
822
|
elif file.content_type in [
|
|
802
823
|
"video/x-flv",
|
|
@@ -819,10 +840,19 @@ def get_base_router(
|
|
|
819
840
|
continue
|
|
820
841
|
elif file.content_type in [
|
|
821
842
|
"application/pdf",
|
|
822
|
-
"
|
|
843
|
+
"application/json",
|
|
844
|
+
"application/x-javascript",
|
|
823
845
|
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
846
|
+
"text/javascript",
|
|
847
|
+
"application/x-python",
|
|
848
|
+
"text/x-python",
|
|
824
849
|
"text/plain",
|
|
825
|
-
"
|
|
850
|
+
"text/html",
|
|
851
|
+
"text/css",
|
|
852
|
+
"text/md",
|
|
853
|
+
"text/csv",
|
|
854
|
+
"text/xml",
|
|
855
|
+
"text/rtf",
|
|
826
856
|
]:
|
|
827
857
|
# Process document files
|
|
828
858
|
try:
|
agno/os/routers/memory/memory.py
CHANGED
|
@@ -120,10 +120,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
120
120
|
)
|
|
121
121
|
async def delete_memory(
|
|
122
122
|
memory_id: str = Path(description="Memory ID to delete"),
|
|
123
|
+
user_id: Optional[str] = Query(default=None, description="User ID to delete memory for"),
|
|
123
124
|
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
|
|
124
125
|
) -> None:
|
|
125
126
|
db = get_db(dbs, db_id)
|
|
126
|
-
db.delete_user_memory(memory_id=memory_id)
|
|
127
|
+
db.delete_user_memory(memory_id=memory_id, user_id=user_id)
|
|
127
128
|
|
|
128
129
|
@router.delete(
|
|
129
130
|
"/memories",
|
|
@@ -145,7 +146,7 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
145
146
|
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
|
|
146
147
|
) -> None:
|
|
147
148
|
db = get_db(dbs, db_id)
|
|
148
|
-
db.delete_user_memories(memory_ids=request.memory_ids)
|
|
149
|
+
db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
|
|
149
150
|
|
|
150
151
|
@router.get(
|
|
151
152
|
"/memories",
|
|
@@ -249,10 +250,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
249
250
|
)
|
|
250
251
|
async def get_memory(
|
|
251
252
|
memory_id: str = Path(description="Memory ID to retrieve"),
|
|
253
|
+
user_id: Optional[str] = Query(default=None, description="User ID to query memory for"),
|
|
252
254
|
db_id: Optional[str] = Query(default=None, description="Database ID to query memory from"),
|
|
253
255
|
) -> UserMemorySchema:
|
|
254
256
|
db = get_db(dbs, db_id)
|
|
255
|
-
user_memory = db.get_user_memory(memory_id=memory_id, deserialize=False)
|
|
257
|
+
user_memory = db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
|
|
256
258
|
if not user_memory:
|
|
257
259
|
raise HTTPException(status_code=404, detail=f"Memory with ID {memory_id} not found")
|
|
258
260
|
|
agno/os/utils.py
CHANGED
|
@@ -93,18 +93,41 @@ def get_session_name(session: Dict[str, Any]) -> str:
|
|
|
93
93
|
|
|
94
94
|
# For teams, identify the first Team run and avoid using the first member's run
|
|
95
95
|
if session.get("session_type") == "team":
|
|
96
|
-
run =
|
|
96
|
+
run = None
|
|
97
|
+
for r in runs:
|
|
98
|
+
# If agent_id is not present, it's a team run
|
|
99
|
+
if not r.get("agent_id"):
|
|
100
|
+
run = r
|
|
101
|
+
break
|
|
102
|
+
# Fallback to first run if no team run found
|
|
103
|
+
if run is None and runs:
|
|
104
|
+
run = runs[0]
|
|
97
105
|
|
|
98
|
-
# For workflows, pass along the first step_executor_run
|
|
99
106
|
elif session.get("session_type") == "workflow":
|
|
100
107
|
try:
|
|
101
|
-
|
|
108
|
+
workflow_run = runs[0]
|
|
109
|
+
workflow_input = workflow_run.get("input")
|
|
110
|
+
if isinstance(workflow_input, str):
|
|
111
|
+
return workflow_input
|
|
112
|
+
elif isinstance(workflow_input, dict):
|
|
113
|
+
try:
|
|
114
|
+
import json
|
|
115
|
+
|
|
116
|
+
return json.dumps(workflow_input)
|
|
117
|
+
except (TypeError, ValueError):
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
workflow_name = session.get("workflow_data", {}).get("name")
|
|
121
|
+
return f"New {workflow_name} Session" if workflow_name else ""
|
|
102
122
|
except (KeyError, IndexError, TypeError):
|
|
103
123
|
return ""
|
|
104
124
|
|
|
105
125
|
# For agents, use the first run
|
|
106
126
|
else:
|
|
107
|
-
run = runs[0]
|
|
127
|
+
run = runs[0] if runs else None
|
|
128
|
+
|
|
129
|
+
if run is None:
|
|
130
|
+
return ""
|
|
108
131
|
|
|
109
132
|
if not isinstance(run, dict):
|
|
110
133
|
run = run.to_dict()
|
|
@@ -150,13 +173,17 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
|
150
173
|
return None
|
|
151
174
|
|
|
152
175
|
|
|
153
|
-
def extract_format(file: UploadFile):
|
|
154
|
-
|
|
176
|
+
def extract_format(file: UploadFile) -> Optional[str]:
|
|
177
|
+
"""Extract the File format from file name or content_type."""
|
|
178
|
+
# Get the format from the filename
|
|
155
179
|
if file.filename and "." in file.filename:
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
180
|
+
return file.filename.split(".")[-1].lower()
|
|
181
|
+
|
|
182
|
+
# Fallback to the file content_type
|
|
183
|
+
if file.content_type:
|
|
184
|
+
return file.content_type.strip().split("/")[-1]
|
|
185
|
+
|
|
186
|
+
return None
|
|
160
187
|
|
|
161
188
|
|
|
162
189
|
def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Callable]]):
|
agno/team/team.py
CHANGED
|
@@ -4035,6 +4035,12 @@ class Team:
|
|
|
4035
4035
|
log_warning("Reasoning error. Reasoning response is empty, continuing regular session...")
|
|
4036
4036
|
break
|
|
4037
4037
|
|
|
4038
|
+
if isinstance(reasoning_agent_response.content, str):
|
|
4039
|
+
log_warning(
|
|
4040
|
+
"Reasoning error. Content is a string, not structured output. Continuing regular session..."
|
|
4041
|
+
)
|
|
4042
|
+
break
|
|
4043
|
+
|
|
4038
4044
|
if reasoning_agent_response.content.reasoning_steps is None:
|
|
4039
4045
|
log_warning("Reasoning error. Reasoning steps are empty, continuing regular session...")
|
|
4040
4046
|
break
|
|
@@ -4261,6 +4267,12 @@ class Team:
|
|
|
4261
4267
|
log_warning("Reasoning error. Reasoning response is empty, continuing regular session...")
|
|
4262
4268
|
break
|
|
4263
4269
|
|
|
4270
|
+
if isinstance(reasoning_agent_response.content, str):
|
|
4271
|
+
log_warning(
|
|
4272
|
+
"Reasoning error. Content is a string, not structured output. Continuing regular session..."
|
|
4273
|
+
)
|
|
4274
|
+
break
|
|
4275
|
+
|
|
4264
4276
|
if reasoning_agent_response.content.reasoning_steps is None:
|
|
4265
4277
|
log_warning("Reasoning error. Reasoning steps are empty, continuing regular session...")
|
|
4266
4278
|
break
|
agno/tools/file.py
CHANGED
|
@@ -75,7 +75,9 @@ class FileTools(Toolkit):
|
|
|
75
75
|
"""
|
|
76
76
|
try:
|
|
77
77
|
log_info(f"Reading files in : {self.base_dir}")
|
|
78
|
-
return json.dumps(
|
|
78
|
+
return json.dumps(
|
|
79
|
+
[str(file_path.relative_to(self.base_dir)) for file_path in self.base_dir.iterdir()], indent=4
|
|
80
|
+
)
|
|
79
81
|
except Exception as e:
|
|
80
82
|
log_error(f"Error reading files: {e}")
|
|
81
83
|
return f"Error reading files: {e}"
|
|
@@ -93,7 +95,7 @@ class FileTools(Toolkit):
|
|
|
93
95
|
log_debug(f"Searching files in {self.base_dir} with pattern {pattern}")
|
|
94
96
|
matching_files = list(self.base_dir.glob(pattern))
|
|
95
97
|
|
|
96
|
-
file_paths = [str(file_path) for file_path in matching_files]
|
|
98
|
+
file_paths = [str(file_path.relative_to(self.base_dir)) for file_path in matching_files]
|
|
97
99
|
|
|
98
100
|
result = {
|
|
99
101
|
"pattern": pattern,
|
agno/tools/mcp.py
CHANGED
|
@@ -22,6 +22,8 @@ except (ImportError, ModuleNotFoundError):
|
|
|
22
22
|
|
|
23
23
|
def _prepare_command(command: str) -> list[str]:
|
|
24
24
|
"""Sanitize a command and split it into parts before using it to run a MCP server."""
|
|
25
|
+
import os
|
|
26
|
+
import shutil
|
|
25
27
|
from shlex import split
|
|
26
28
|
|
|
27
29
|
# Block dangerous characters
|
|
@@ -55,10 +57,53 @@ def _prepare_command(command: str) -> list[str]:
|
|
|
55
57
|
}
|
|
56
58
|
|
|
57
59
|
executable = parts[0].split("/")[-1]
|
|
60
|
+
|
|
61
|
+
# Check if it's a relative path starting with ./ or ../
|
|
62
|
+
if executable.startswith("./") or executable.startswith("../"):
|
|
63
|
+
# Allow relative paths to binaries
|
|
64
|
+
return parts
|
|
65
|
+
|
|
66
|
+
# Check if it's an absolute path to a binary
|
|
67
|
+
if executable.startswith("/") and os.path.isfile(executable):
|
|
68
|
+
# Allow absolute paths to existing files
|
|
69
|
+
return parts
|
|
70
|
+
|
|
71
|
+
# Check if it's a binary in current directory without ./
|
|
72
|
+
if "/" not in executable and os.path.isfile(executable):
|
|
73
|
+
# Allow binaries in current directory
|
|
74
|
+
return parts
|
|
75
|
+
|
|
76
|
+
# Check if it's a binary in PATH
|
|
77
|
+
if shutil.which(executable):
|
|
78
|
+
return parts
|
|
79
|
+
|
|
58
80
|
if executable not in ALLOWED_COMMANDS:
|
|
59
81
|
raise ValueError(f"MCP command needs to use one of the following executables: {ALLOWED_COMMANDS}")
|
|
60
82
|
|
|
61
|
-
|
|
83
|
+
first_part = parts[0]
|
|
84
|
+
executable = first_part.split("/")[-1]
|
|
85
|
+
|
|
86
|
+
# Allow known commands
|
|
87
|
+
if executable in ALLOWED_COMMANDS:
|
|
88
|
+
return parts
|
|
89
|
+
|
|
90
|
+
# Allow relative paths to custom binaries
|
|
91
|
+
if first_part.startswith(("./", "../")):
|
|
92
|
+
return parts
|
|
93
|
+
|
|
94
|
+
# Allow absolute paths to existing files
|
|
95
|
+
if first_part.startswith("/") and os.path.isfile(first_part):
|
|
96
|
+
return parts
|
|
97
|
+
|
|
98
|
+
# Allow binaries in current directory without ./
|
|
99
|
+
if "/" not in first_part and os.path.isfile(first_part):
|
|
100
|
+
return parts
|
|
101
|
+
|
|
102
|
+
# Allow binaries in PATH
|
|
103
|
+
if shutil.which(first_part):
|
|
104
|
+
return parts
|
|
105
|
+
|
|
106
|
+
raise ValueError(f"MCP command needs to use one of the following executables: {ALLOWED_COMMANDS}")
|
|
62
107
|
|
|
63
108
|
|
|
64
109
|
@dataclass
|
agno/utils/merge_dict.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, Dict
|
|
1
|
+
from typing import Any, Dict, List
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
def merge_dictionaries(a: Dict[str, Any], b: Dict[str, Any]) -> None:
|
|
@@ -18,3 +18,24 @@ def merge_dictionaries(a: Dict[str, Any], b: Dict[str, Any]) -> None:
|
|
|
18
18
|
merge_dictionaries(a[key], b[key])
|
|
19
19
|
else:
|
|
20
20
|
a[key] = b[key]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def merge_parallel_session_states(original_state: Dict[str, Any], modified_states: List[Dict[str, Any]]) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Smart merge for parallel session states that only applies actual changes.
|
|
26
|
+
This prevents parallel steps from overwriting each other's changes.
|
|
27
|
+
"""
|
|
28
|
+
if not original_state or not modified_states:
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
# Collect all actual changes (keys where value differs from original)
|
|
32
|
+
all_changes = {}
|
|
33
|
+
for modified_state in modified_states:
|
|
34
|
+
if modified_state:
|
|
35
|
+
for key, value in modified_state.items():
|
|
36
|
+
if key not in original_state or original_state[key] != value:
|
|
37
|
+
all_changes[key] = value
|
|
38
|
+
|
|
39
|
+
# Apply all collected changes to the original state
|
|
40
|
+
for key, value in all_changes.items():
|
|
41
|
+
original_state[key] = value
|