agno 2.3.8__py3-none-any.whl → 2.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +134 -94
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2888 -0
- agno/db/mysql/mysql.py +17 -8
- agno/db/mysql/utils.py +139 -6
- agno/db/postgres/async_postgres.py +10 -5
- agno/db/postgres/postgres.py +7 -2
- agno/db/schemas/evals.py +1 -0
- agno/db/singlestore/singlestore.py +5 -1
- agno/db/sqlite/async_sqlite.py +3 -3
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +11 -8
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/utils.py +2 -1
- agno/exceptions.py +7 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/knowledge.py +1142 -176
- agno/media.py +22 -6
- agno/models/aws/claude.py +8 -7
- agno/models/base.py +61 -2
- agno/models/deepseek/deepseek.py +67 -0
- agno/models/google/gemini.py +134 -51
- agno/models/google/utils.py +22 -0
- agno/models/message.py +5 -0
- agno/models/openai/chat.py +4 -0
- agno/os/app.py +64 -74
- agno/os/interfaces/a2a/router.py +3 -4
- agno/os/interfaces/agui/router.py +2 -0
- agno/os/router.py +3 -1607
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +581 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +26 -6
- agno/os/routers/evals/schemas.py +34 -2
- agno/os/routers/evals/utils.py +77 -18
- agno/os/routers/knowledge/knowledge.py +1 -1
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +496 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +545 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +1 -559
- agno/os/utils.py +139 -2
- agno/team/team.py +87 -24
- agno/tools/file_generation.py +12 -6
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +37 -23
- agno/tools/shopify.py +1519 -0
- agno/tools/spotify.py +2 -5
- agno/utils/hooks.py +64 -5
- agno/utils/http.py +2 -2
- agno/utils/media.py +11 -1
- agno/utils/print_response/agent.py +8 -0
- agno/utils/print_response/team.py +8 -0
- agno/vectordb/pgvector/pgvector.py +88 -51
- agno/workflow/parallel.py +5 -3
- agno/workflow/step.py +14 -2
- agno/workflow/types.py +38 -2
- agno/workflow/workflow.py +12 -4
- {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/METADATA +7 -2
- {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/RECORD +66 -52
- {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/WHEEL +0 -0
- {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.8.dist-info → agno-2.3.10.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Optional, Union
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
from agno.agent import Agent
|
|
7
|
+
from agno.os.routers.agents.schema import AgentResponse
|
|
8
|
+
from agno.os.schema import ModelResponse
|
|
9
|
+
from agno.os.utils import (
|
|
10
|
+
format_team_tools,
|
|
11
|
+
get_team_input_schema_dict,
|
|
12
|
+
)
|
|
13
|
+
from agno.run import RunContext
|
|
14
|
+
from agno.run.team import TeamRunOutput
|
|
15
|
+
from agno.session import TeamSession
|
|
16
|
+
from agno.team.team import Team
|
|
17
|
+
from agno.utils.agent import aexecute_instructions, aexecute_system_message
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TeamResponse(BaseModel):
|
|
21
|
+
id: Optional[str] = None
|
|
22
|
+
name: Optional[str] = None
|
|
23
|
+
db_id: Optional[str] = None
|
|
24
|
+
description: Optional[str] = None
|
|
25
|
+
model: Optional[ModelResponse] = None
|
|
26
|
+
tools: Optional[Dict[str, Any]] = None
|
|
27
|
+
sessions: Optional[Dict[str, Any]] = None
|
|
28
|
+
knowledge: Optional[Dict[str, Any]] = None
|
|
29
|
+
memory: Optional[Dict[str, Any]] = None
|
|
30
|
+
reasoning: Optional[Dict[str, Any]] = None
|
|
31
|
+
default_tools: Optional[Dict[str, Any]] = None
|
|
32
|
+
system_message: Optional[Dict[str, Any]] = None
|
|
33
|
+
response_settings: Optional[Dict[str, Any]] = None
|
|
34
|
+
introduction: Optional[str] = None
|
|
35
|
+
streaming: Optional[Dict[str, Any]] = None
|
|
36
|
+
members: Optional[List[Union[AgentResponse, "TeamResponse"]]] = None
|
|
37
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
38
|
+
input_schema: Optional[Dict[str, Any]] = None
|
|
39
|
+
|
|
40
|
+
@classmethod
|
|
41
|
+
async def from_team(cls, team: Team) -> "TeamResponse":
|
|
42
|
+
def filter_meaningful_config(d: Dict[str, Any], defaults: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
|
43
|
+
"""Filter out fields that match their default values, keeping only meaningful user configurations"""
|
|
44
|
+
filtered = {}
|
|
45
|
+
for key, value in d.items():
|
|
46
|
+
if value is None:
|
|
47
|
+
continue
|
|
48
|
+
# Skip if value matches the default exactly
|
|
49
|
+
if key in defaults and value == defaults[key]:
|
|
50
|
+
continue
|
|
51
|
+
# Keep non-default values
|
|
52
|
+
filtered[key] = value
|
|
53
|
+
return filtered if filtered else None
|
|
54
|
+
|
|
55
|
+
# Define default values for filtering (similar to agent defaults)
|
|
56
|
+
team_defaults = {
|
|
57
|
+
# Sessions defaults
|
|
58
|
+
"add_history_to_context": False,
|
|
59
|
+
"num_history_runs": 3,
|
|
60
|
+
"enable_session_summaries": False,
|
|
61
|
+
"cache_session": False,
|
|
62
|
+
# Knowledge defaults
|
|
63
|
+
"add_references": False,
|
|
64
|
+
"references_format": "json",
|
|
65
|
+
"enable_agentic_knowledge_filters": False,
|
|
66
|
+
# Memory defaults
|
|
67
|
+
"enable_agentic_memory": False,
|
|
68
|
+
"enable_user_memories": False,
|
|
69
|
+
# Reasoning defaults
|
|
70
|
+
"reasoning": False,
|
|
71
|
+
"reasoning_min_steps": 1,
|
|
72
|
+
"reasoning_max_steps": 10,
|
|
73
|
+
# Default tools defaults
|
|
74
|
+
"search_knowledge": True,
|
|
75
|
+
"read_chat_history": False,
|
|
76
|
+
"get_member_information_tool": False,
|
|
77
|
+
# System message defaults
|
|
78
|
+
"system_message_role": "system",
|
|
79
|
+
"markdown": False,
|
|
80
|
+
"add_datetime_to_context": False,
|
|
81
|
+
"add_location_to_context": False,
|
|
82
|
+
"resolve_in_context": True,
|
|
83
|
+
# Response settings defaults
|
|
84
|
+
"parse_response": True,
|
|
85
|
+
"use_json_mode": False,
|
|
86
|
+
# Streaming defaults
|
|
87
|
+
"stream_events": False,
|
|
88
|
+
"stream_intermediate_steps": False,
|
|
89
|
+
"stream_member_events": False,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
run_id = str(uuid4())
|
|
93
|
+
session_id = str(uuid4())
|
|
94
|
+
_tools = team._determine_tools_for_model(
|
|
95
|
+
model=team.model, # type: ignore
|
|
96
|
+
session=TeamSession(session_id=session_id, session_data={}),
|
|
97
|
+
run_response=TeamRunOutput(run_id=run_id),
|
|
98
|
+
run_context=RunContext(run_id=run_id, session_id=session_id, session_state={}),
|
|
99
|
+
async_mode=True,
|
|
100
|
+
team_run_context={},
|
|
101
|
+
check_mcp_tools=False,
|
|
102
|
+
)
|
|
103
|
+
team_tools = _tools
|
|
104
|
+
formatted_tools = format_team_tools(team_tools) if team_tools else None
|
|
105
|
+
|
|
106
|
+
model_name = team.model.name or team.model.__class__.__name__ if team.model else None
|
|
107
|
+
model_provider = team.model.provider or team.model.__class__.__name__ if team.model else ""
|
|
108
|
+
model_id = team.model.id if team.model else None
|
|
109
|
+
|
|
110
|
+
if model_provider and model_id:
|
|
111
|
+
model_provider = f"{model_provider} {model_id}"
|
|
112
|
+
elif model_name and model_id:
|
|
113
|
+
model_provider = f"{model_name} {model_id}"
|
|
114
|
+
elif model_id:
|
|
115
|
+
model_provider = model_id
|
|
116
|
+
|
|
117
|
+
session_table = team.db.session_table_name if team.db else None
|
|
118
|
+
knowledge_table = team.db.knowledge_table_name if team.db and team.knowledge else None
|
|
119
|
+
|
|
120
|
+
tools_info = {
|
|
121
|
+
"tools": formatted_tools,
|
|
122
|
+
"tool_call_limit": team.tool_call_limit,
|
|
123
|
+
"tool_choice": team.tool_choice,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
sessions_info = {
|
|
127
|
+
"session_table": session_table,
|
|
128
|
+
"add_history_to_context": team.add_history_to_context,
|
|
129
|
+
"enable_session_summaries": team.enable_session_summaries,
|
|
130
|
+
"num_history_runs": team.num_history_runs,
|
|
131
|
+
"cache_session": team.cache_session,
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
knowledge_info = {
|
|
135
|
+
"knowledge_table": knowledge_table,
|
|
136
|
+
"enable_agentic_knowledge_filters": team.enable_agentic_knowledge_filters,
|
|
137
|
+
"knowledge_filters": team.knowledge_filters,
|
|
138
|
+
"references_format": team.references_format,
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
memory_info: Optional[Dict[str, Any]] = None
|
|
142
|
+
if team.memory_manager is not None:
|
|
143
|
+
memory_info = {
|
|
144
|
+
"enable_agentic_memory": team.enable_agentic_memory,
|
|
145
|
+
"enable_user_memories": team.enable_user_memories,
|
|
146
|
+
"metadata": team.metadata,
|
|
147
|
+
"memory_table": team.db.memory_table_name if team.db and team.enable_user_memories else None,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if team.memory_manager.model is not None:
|
|
151
|
+
memory_info["model"] = ModelResponse(
|
|
152
|
+
name=team.memory_manager.model.name,
|
|
153
|
+
model=team.memory_manager.model.id,
|
|
154
|
+
provider=team.memory_manager.model.provider,
|
|
155
|
+
).model_dump()
|
|
156
|
+
|
|
157
|
+
reasoning_info: Dict[str, Any] = {
|
|
158
|
+
"reasoning": team.reasoning,
|
|
159
|
+
"reasoning_agent_id": team.reasoning_agent.id if team.reasoning_agent else None,
|
|
160
|
+
"reasoning_min_steps": team.reasoning_min_steps,
|
|
161
|
+
"reasoning_max_steps": team.reasoning_max_steps,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
if team.reasoning_model:
|
|
165
|
+
reasoning_info["reasoning_model"] = ModelResponse(
|
|
166
|
+
name=team.reasoning_model.name,
|
|
167
|
+
model=team.reasoning_model.id,
|
|
168
|
+
provider=team.reasoning_model.provider,
|
|
169
|
+
).model_dump()
|
|
170
|
+
|
|
171
|
+
default_tools_info = {
|
|
172
|
+
"search_knowledge": team.search_knowledge,
|
|
173
|
+
"read_chat_history": team.read_chat_history,
|
|
174
|
+
"get_member_information_tool": team.get_member_information_tool,
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
team_instructions = team.instructions if team.instructions else None
|
|
178
|
+
if team_instructions and callable(team_instructions):
|
|
179
|
+
team_instructions = await aexecute_instructions(instructions=team_instructions, agent=team, team=team)
|
|
180
|
+
|
|
181
|
+
team_system_message = team.system_message if team.system_message else None
|
|
182
|
+
if team_system_message and callable(team_system_message):
|
|
183
|
+
team_system_message = await aexecute_system_message(
|
|
184
|
+
system_message=team_system_message, agent=team, team=team
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
system_message_info = {
|
|
188
|
+
"system_message": team_system_message,
|
|
189
|
+
"system_message_role": team.system_message_role,
|
|
190
|
+
"description": team.description,
|
|
191
|
+
"instructions": team_instructions,
|
|
192
|
+
"expected_output": team.expected_output,
|
|
193
|
+
"additional_context": team.additional_context,
|
|
194
|
+
"markdown": team.markdown,
|
|
195
|
+
"add_datetime_to_context": team.add_datetime_to_context,
|
|
196
|
+
"add_location_to_context": team.add_location_to_context,
|
|
197
|
+
"resolve_in_context": team.resolve_in_context,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
response_settings_info: Dict[str, Any] = {
|
|
201
|
+
"output_schema_name": team.output_schema.__name__ if team.output_schema else None,
|
|
202
|
+
"parser_model_prompt": team.parser_model_prompt,
|
|
203
|
+
"parse_response": team.parse_response,
|
|
204
|
+
"use_json_mode": team.use_json_mode,
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if team.parser_model:
|
|
208
|
+
response_settings_info["parser_model"] = ModelResponse(
|
|
209
|
+
name=team.parser_model.name,
|
|
210
|
+
model=team.parser_model.id,
|
|
211
|
+
provider=team.parser_model.provider,
|
|
212
|
+
).model_dump()
|
|
213
|
+
|
|
214
|
+
streaming_info = {
|
|
215
|
+
"stream": team.stream,
|
|
216
|
+
"stream_events": team.stream_events,
|
|
217
|
+
"stream_intermediate_steps": team.stream_intermediate_steps,
|
|
218
|
+
"stream_member_events": team.stream_member_events,
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Build team model only if it has at least one non-null field
|
|
222
|
+
_team_model_data: Dict[str, Any] = {}
|
|
223
|
+
if team.model and team.model.name is not None:
|
|
224
|
+
_team_model_data["name"] = team.model.name
|
|
225
|
+
if team.model and team.model.id is not None:
|
|
226
|
+
_team_model_data["model"] = team.model.id
|
|
227
|
+
if team.model and team.model.provider is not None:
|
|
228
|
+
_team_model_data["provider"] = team.model.provider
|
|
229
|
+
|
|
230
|
+
members: List[Union[AgentResponse, TeamResponse]] = []
|
|
231
|
+
for member in team.members:
|
|
232
|
+
if isinstance(member, Agent):
|
|
233
|
+
agent_response = await AgentResponse.from_agent(member)
|
|
234
|
+
members.append(agent_response)
|
|
235
|
+
if isinstance(member, Team):
|
|
236
|
+
team_response = await TeamResponse.from_team(member)
|
|
237
|
+
members.append(team_response)
|
|
238
|
+
|
|
239
|
+
return TeamResponse(
|
|
240
|
+
id=team.id,
|
|
241
|
+
name=team.name,
|
|
242
|
+
db_id=team.db.id if team.db else None,
|
|
243
|
+
model=ModelResponse(**_team_model_data) if _team_model_data else None,
|
|
244
|
+
tools=filter_meaningful_config(tools_info, {}),
|
|
245
|
+
sessions=filter_meaningful_config(sessions_info, team_defaults),
|
|
246
|
+
knowledge=filter_meaningful_config(knowledge_info, team_defaults),
|
|
247
|
+
memory=filter_meaningful_config(memory_info, team_defaults) if memory_info else None,
|
|
248
|
+
reasoning=filter_meaningful_config(reasoning_info, team_defaults),
|
|
249
|
+
default_tools=filter_meaningful_config(default_tools_info, team_defaults),
|
|
250
|
+
system_message=filter_meaningful_config(system_message_info, team_defaults),
|
|
251
|
+
response_settings=filter_meaningful_config(response_settings_info, team_defaults),
|
|
252
|
+
introduction=team.introduction,
|
|
253
|
+
streaming=filter_meaningful_config(streaming_info, team_defaults),
|
|
254
|
+
members=members if members else None,
|
|
255
|
+
metadata=team.metadata,
|
|
256
|
+
input_schema=get_team_input_schema_dict(team),
|
|
257
|
+
)
|