agno 2.1.3__py3-none-any.whl → 2.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1779 -577
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/async_postgres/async_postgres.py +1668 -0
- agno/db/async_postgres/schemas.py +124 -0
- agno/db/async_postgres/utils.py +289 -0
- agno/db/base.py +237 -2
- agno/db/dynamo/dynamo.py +10 -8
- agno/db/dynamo/schemas.py +1 -10
- agno/db/dynamo/utils.py +2 -2
- agno/db/firestore/firestore.py +2 -2
- agno/db/firestore/utils.py +4 -2
- agno/db/gcs_json/gcs_json_db.py +2 -2
- agno/db/in_memory/in_memory_db.py +2 -2
- agno/db/json/json_db.py +2 -2
- agno/db/migrations/v1_to_v2.py +30 -13
- agno/db/mongo/mongo.py +18 -6
- agno/db/mysql/mysql.py +35 -13
- agno/db/postgres/postgres.py +29 -6
- agno/db/redis/redis.py +2 -2
- agno/db/singlestore/singlestore.py +2 -2
- agno/db/sqlite/sqlite.py +34 -12
- agno/db/sqlite/utils.py +8 -3
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +8 -2
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/knowledge.py +260 -46
- agno/knowledge/reader/pdf_reader.py +4 -6
- agno/knowledge/reader/reader_factory.py +2 -3
- agno/memory/manager.py +241 -33
- agno/models/anthropic/claude.py +37 -0
- agno/os/app.py +15 -10
- agno/os/interfaces/a2a/router.py +3 -5
- agno/os/interfaces/agui/router.py +4 -1
- agno/os/interfaces/agui/utils.py +33 -6
- agno/os/interfaces/slack/router.py +2 -4
- agno/os/mcp.py +98 -41
- agno/os/router.py +23 -0
- agno/os/routers/evals/evals.py +52 -20
- agno/os/routers/evals/utils.py +14 -14
- agno/os/routers/knowledge/knowledge.py +130 -9
- agno/os/routers/knowledge/schemas.py +57 -0
- agno/os/routers/memory/memory.py +116 -44
- agno/os/routers/metrics/metrics.py +16 -6
- agno/os/routers/session/session.py +65 -22
- agno/os/schema.py +38 -0
- agno/os/utils.py +69 -13
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/session/workflow.py +69 -1
- agno/team/team.py +934 -241
- agno/tools/function.py +36 -18
- agno/tools/google_drive.py +270 -0
- agno/tools/googlesheets.py +20 -5
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/scrapegraph.py +1 -1
- agno/utils/models/claude.py +3 -1
- agno/utils/print_response/workflow.py +112 -12
- agno/utils/streamlit.py +1 -1
- agno/vectordb/base.py +22 -1
- agno/vectordb/cassandra/cassandra.py +9 -0
- agno/vectordb/chroma/chromadb.py +26 -6
- agno/vectordb/clickhouse/clickhousedb.py +9 -1
- agno/vectordb/couchbase/couchbase.py +11 -0
- agno/vectordb/lancedb/lance_db.py +20 -0
- agno/vectordb/langchaindb/langchaindb.py +11 -0
- agno/vectordb/lightrag/lightrag.py +9 -0
- agno/vectordb/llamaindex/llamaindexdb.py +15 -1
- agno/vectordb/milvus/milvus.py +23 -0
- agno/vectordb/mongodb/mongodb.py +22 -0
- agno/vectordb/pgvector/pgvector.py +19 -0
- agno/vectordb/pineconedb/pineconedb.py +35 -4
- agno/vectordb/qdrant/qdrant.py +24 -0
- agno/vectordb/singlestore/singlestore.py +25 -17
- agno/vectordb/surrealdb/surrealdb.py +18 -1
- agno/vectordb/upstashdb/upstashdb.py +26 -1
- agno/vectordb/weaviate/weaviate.py +18 -0
- agno/workflow/condition.py +29 -0
- agno/workflow/loop.py +29 -0
- agno/workflow/parallel.py +141 -113
- agno/workflow/router.py +29 -0
- agno/workflow/step.py +146 -25
- agno/workflow/steps.py +29 -0
- agno/workflow/types.py +26 -1
- agno/workflow/workflow.py +507 -22
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/METADATA +100 -41
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/RECORD +94 -86
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/WHEEL +0 -0
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/top_level.txt +0 -0
agno/team/team.py
CHANGED
|
@@ -30,7 +30,7 @@ from uuid import uuid4
|
|
|
30
30
|
from pydantic import BaseModel
|
|
31
31
|
|
|
32
32
|
from agno.agent import Agent
|
|
33
|
-
from agno.db.base import BaseDb, SessionType, UserMemory
|
|
33
|
+
from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
|
|
34
34
|
from agno.exceptions import (
|
|
35
35
|
InputCheckError,
|
|
36
36
|
ModelProviderError,
|
|
@@ -214,7 +214,7 @@ class Team:
|
|
|
214
214
|
|
|
215
215
|
# --- Database ---
|
|
216
216
|
# Database to use for this agent
|
|
217
|
-
db: Optional[BaseDb] = None
|
|
217
|
+
db: Optional[Union[BaseDb, AsyncBaseDb]] = None
|
|
218
218
|
|
|
219
219
|
# Memory manager to use for this agent
|
|
220
220
|
memory_manager: Optional[MemoryManager] = None
|
|
@@ -259,6 +259,10 @@ class Team:
|
|
|
259
259
|
send_media_to_model: bool = True
|
|
260
260
|
# If True, store media in run output
|
|
261
261
|
store_media: bool = True
|
|
262
|
+
# If True, store tool results in run output
|
|
263
|
+
store_tool_results: bool = True
|
|
264
|
+
# If True, store history messages in run output
|
|
265
|
+
store_history_messages: bool = True
|
|
262
266
|
|
|
263
267
|
# --- Team Tools ---
|
|
264
268
|
# A list of tools provided to the Model.
|
|
@@ -418,6 +422,8 @@ class Team:
|
|
|
418
422
|
search_knowledge: bool = True,
|
|
419
423
|
read_team_history: bool = False,
|
|
420
424
|
store_media: bool = True,
|
|
425
|
+
store_tool_results: bool = True,
|
|
426
|
+
store_history_messages: bool = True,
|
|
421
427
|
send_media_to_model: bool = True,
|
|
422
428
|
tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None,
|
|
423
429
|
tool_call_limit: Optional[int] = None,
|
|
@@ -433,7 +439,7 @@ class Team:
|
|
|
433
439
|
output_model_prompt: Optional[str] = None,
|
|
434
440
|
use_json_mode: bool = False,
|
|
435
441
|
parse_response: bool = True,
|
|
436
|
-
db: Optional[BaseDb] = None,
|
|
442
|
+
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
|
|
437
443
|
enable_agentic_memory: bool = False,
|
|
438
444
|
enable_user_memories: bool = False,
|
|
439
445
|
add_memories_to_context: Optional[bool] = None,
|
|
@@ -518,6 +524,8 @@ class Team:
|
|
|
518
524
|
self.read_team_history = read_team_history
|
|
519
525
|
|
|
520
526
|
self.store_media = store_media
|
|
527
|
+
self.store_tool_results = store_tool_results
|
|
528
|
+
self.store_history_messages = store_history_messages
|
|
521
529
|
self.send_media_to_model = send_media_to_model
|
|
522
530
|
|
|
523
531
|
self.tools = tools
|
|
@@ -792,6 +800,10 @@ class Team:
|
|
|
792
800
|
|
|
793
801
|
return session_id, user_id, session_state # type: ignore
|
|
794
802
|
|
|
803
|
+
def _has_async_db(self) -> bool:
|
|
804
|
+
"""Return True if the db the team is equipped with is an Async implementation"""
|
|
805
|
+
return self.db is not None and isinstance(self.db, AsyncBaseDb)
|
|
806
|
+
|
|
795
807
|
def initialize_team(self, debug_mode: Optional[bool] = None) -> None:
|
|
796
808
|
# Make sure for the team, we are using the team logger
|
|
797
809
|
use_team_logger()
|
|
@@ -1205,7 +1217,11 @@ class Team:
|
|
|
1205
1217
|
)
|
|
1206
1218
|
deque(response_iterator, maxlen=0)
|
|
1207
1219
|
|
|
1208
|
-
# 10.
|
|
1220
|
+
# 10. Scrub the stored run based on storage flags
|
|
1221
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1222
|
+
session.upsert_run(run_response=run_response)
|
|
1223
|
+
|
|
1224
|
+
# 11. Save session to storage
|
|
1209
1225
|
self.save_session(session=session)
|
|
1210
1226
|
|
|
1211
1227
|
# Log Team Telemetry
|
|
@@ -1232,7 +1248,6 @@ class Team:
|
|
|
1232
1248
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1233
1249
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1234
1250
|
stream_intermediate_steps: bool = False,
|
|
1235
|
-
workflow_context: Optional[Dict] = None,
|
|
1236
1251
|
yield_run_response: bool = False,
|
|
1237
1252
|
debug_mode: Optional[bool] = None,
|
|
1238
1253
|
**kwargs: Any,
|
|
@@ -1244,10 +1259,11 @@ class Team:
|
|
|
1244
1259
|
2. Prepare run messages
|
|
1245
1260
|
3. Reason about the task(s) if reasoning is enabled
|
|
1246
1261
|
4. Get a response from the model
|
|
1247
|
-
5. Add
|
|
1248
|
-
6.
|
|
1249
|
-
7.
|
|
1250
|
-
8.
|
|
1262
|
+
5. Add the run to Team Session
|
|
1263
|
+
6. Update Team Memory
|
|
1264
|
+
7. Create the run completed event
|
|
1265
|
+
8. Calculate session metrics
|
|
1266
|
+
9. Save session to storage
|
|
1251
1267
|
"""
|
|
1252
1268
|
# Register run for cancellation tracking
|
|
1253
1269
|
register_run(run_response.run_id) # type: ignore
|
|
@@ -1286,7 +1302,6 @@ class Team:
|
|
|
1286
1302
|
videos=run_input.videos,
|
|
1287
1303
|
audio=run_input.audios,
|
|
1288
1304
|
files=run_input.files,
|
|
1289
|
-
workflow_context=workflow_context,
|
|
1290
1305
|
debug_mode=debug_mode,
|
|
1291
1306
|
add_history_to_context=add_history_to_context,
|
|
1292
1307
|
add_session_state_to_context=add_session_state_to_context,
|
|
@@ -1322,7 +1337,7 @@ class Team:
|
|
|
1322
1337
|
try:
|
|
1323
1338
|
# Start the Run by yielding a RunStarted event
|
|
1324
1339
|
if stream_intermediate_steps:
|
|
1325
|
-
yield self._handle_event(create_team_run_started_event(run_response), run_response
|
|
1340
|
+
yield self._handle_event(create_team_run_started_event(run_response), run_response)
|
|
1326
1341
|
|
|
1327
1342
|
# 3. Reason about the task(s) if reasoning is enabled
|
|
1328
1343
|
yield from self._handle_reasoning_stream(
|
|
@@ -1341,7 +1356,6 @@ class Team:
|
|
|
1341
1356
|
run_messages=run_messages,
|
|
1342
1357
|
response_format=response_format,
|
|
1343
1358
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
1344
|
-
workflow_context=workflow_context,
|
|
1345
1359
|
):
|
|
1346
1360
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1347
1361
|
yield event
|
|
@@ -1352,7 +1366,6 @@ class Team:
|
|
|
1352
1366
|
run_messages=run_messages,
|
|
1353
1367
|
response_format=response_format,
|
|
1354
1368
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
1355
|
-
workflow_context=workflow_context,
|
|
1356
1369
|
):
|
|
1357
1370
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1358
1371
|
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
|
|
@@ -1371,7 +1384,6 @@ class Team:
|
|
|
1371
1384
|
run_response=run_response,
|
|
1372
1385
|
run_messages=run_messages,
|
|
1373
1386
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
1374
|
-
workflow_context=workflow_context,
|
|
1375
1387
|
):
|
|
1376
1388
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1377
1389
|
yield event
|
|
@@ -1395,26 +1407,30 @@ class Team:
|
|
|
1395
1407
|
# 5. Add the run to Team Session
|
|
1396
1408
|
session.upsert_run(run_response=run_response)
|
|
1397
1409
|
|
|
1398
|
-
# 6.
|
|
1399
|
-
self.
|
|
1410
|
+
# 6. Update Team Memory
|
|
1411
|
+
yield from self._make_memories_and_summaries(
|
|
1412
|
+
run_response=run_response,
|
|
1413
|
+
run_messages=run_messages,
|
|
1414
|
+
session=session,
|
|
1415
|
+
user_id=user_id,
|
|
1416
|
+
)
|
|
1400
1417
|
|
|
1418
|
+
# 7. Create the run completed event
|
|
1401
1419
|
completed_event = self._handle_event(
|
|
1402
1420
|
create_team_run_completed_event(
|
|
1403
1421
|
from_run_response=run_response,
|
|
1404
1422
|
),
|
|
1405
1423
|
run_response,
|
|
1406
|
-
workflow_context,
|
|
1407
1424
|
)
|
|
1408
1425
|
|
|
1409
|
-
#
|
|
1410
|
-
|
|
1411
|
-
run_response=run_response,
|
|
1412
|
-
run_messages=run_messages,
|
|
1413
|
-
session=session,
|
|
1414
|
-
user_id=user_id,
|
|
1415
|
-
)
|
|
1426
|
+
# 8. Calculate session metrics
|
|
1427
|
+
self._update_session_metrics(session=session)
|
|
1416
1428
|
|
|
1417
|
-
#
|
|
1429
|
+
# 9. Scrub the stored run based on storage flags
|
|
1430
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1431
|
+
session.upsert_run(run_response=run_response)
|
|
1432
|
+
|
|
1433
|
+
# 10. Save session to storage
|
|
1418
1434
|
self.save_session(session=session)
|
|
1419
1435
|
|
|
1420
1436
|
if stream_intermediate_steps:
|
|
@@ -1438,7 +1454,6 @@ class Team:
|
|
|
1438
1454
|
yield self._handle_event(
|
|
1439
1455
|
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
1440
1456
|
run_response,
|
|
1441
|
-
workflow_context,
|
|
1442
1457
|
)
|
|
1443
1458
|
|
|
1444
1459
|
# Add the RunOutput to Team Session even when cancelled
|
|
@@ -1524,6 +1539,8 @@ class Team:
|
|
|
1524
1539
|
**kwargs: Any,
|
|
1525
1540
|
) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
|
|
1526
1541
|
"""Run the Team and return the response."""
|
|
1542
|
+
if self._has_async_db():
|
|
1543
|
+
raise Exception("run() is not supported with an async DB. Please use arun() instead.")
|
|
1527
1544
|
|
|
1528
1545
|
# Create a run_id for this specific run
|
|
1529
1546
|
run_id = str(uuid4())
|
|
@@ -1584,9 +1601,6 @@ class Team:
|
|
|
1584
1601
|
)
|
|
1585
1602
|
add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
|
|
1586
1603
|
|
|
1587
|
-
# Extract workflow context from kwargs if present
|
|
1588
|
-
workflow_context = kwargs.pop("workflow_context", None)
|
|
1589
|
-
|
|
1590
1604
|
# Initialize Knowledge Filters
|
|
1591
1605
|
effective_filters = knowledge_filters
|
|
1592
1606
|
|
|
@@ -1665,7 +1679,6 @@ class Team:
|
|
|
1665
1679
|
dependencies=run_dependencies,
|
|
1666
1680
|
response_format=response_format,
|
|
1667
1681
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
1668
|
-
workflow_context=workflow_context,
|
|
1669
1682
|
yield_run_response=yield_run_response,
|
|
1670
1683
|
debug_mode=debug_mode,
|
|
1671
1684
|
**kwargs,
|
|
@@ -1745,47 +1758,71 @@ class Team:
|
|
|
1745
1758
|
|
|
1746
1759
|
async def _arun(
|
|
1747
1760
|
self,
|
|
1761
|
+
input: Union[str, List, Dict, Message, BaseModel],
|
|
1748
1762
|
run_response: TeamRunOutput,
|
|
1749
|
-
|
|
1750
|
-
session_state: Dict[str, Any],
|
|
1763
|
+
session_id: str,
|
|
1764
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
1765
|
+
store_member_responses: Optional[bool] = None,
|
|
1751
1766
|
user_id: Optional[str] = None,
|
|
1752
|
-
|
|
1753
|
-
add_history_to_context: Optional[bool] = None,
|
|
1767
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1754
1768
|
add_dependencies_to_context: Optional[bool] = None,
|
|
1755
1769
|
add_session_state_to_context: Optional[bool] = None,
|
|
1770
|
+
add_history_to_context: Optional[bool] = None,
|
|
1771
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1756
1772
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1773
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
1774
|
+
images: Optional[Sequence[Image]] = None,
|
|
1775
|
+
videos: Optional[Sequence[Video]] = None,
|
|
1776
|
+
files: Optional[Sequence[File]] = None,
|
|
1760
1777
|
debug_mode: Optional[bool] = None,
|
|
1778
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
1761
1779
|
**kwargs: Any,
|
|
1762
1780
|
) -> TeamRunOutput:
|
|
1763
1781
|
"""Run the Team and return the response.
|
|
1764
1782
|
|
|
1765
1783
|
Steps:
|
|
1766
|
-
1.
|
|
1767
|
-
2.
|
|
1768
|
-
3.
|
|
1769
|
-
4.
|
|
1770
|
-
5.
|
|
1771
|
-
6.
|
|
1772
|
-
7.
|
|
1773
|
-
8. Update
|
|
1774
|
-
9.
|
|
1784
|
+
1. Read or create session
|
|
1785
|
+
2. Update metadata and session state
|
|
1786
|
+
3. Execute pre-hooks
|
|
1787
|
+
4. Determine tools for model
|
|
1788
|
+
5. Prepare run messages
|
|
1789
|
+
6. Reason about the task if reasoning is enabled
|
|
1790
|
+
7. Get a response from the Model (includes running function calls)
|
|
1791
|
+
8. Update TeamRunOutput
|
|
1792
|
+
9. Add the run to memory
|
|
1793
|
+
10. Calculate session metrics
|
|
1794
|
+
11. Parse team response model
|
|
1795
|
+
12. Update Team Memory
|
|
1796
|
+
13. Scrub the stored run if needed
|
|
1797
|
+
14. Save session to storage
|
|
1798
|
+
15. Execute post-hooks
|
|
1775
1799
|
"""
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1800
|
+
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
|
|
1801
|
+
|
|
1802
|
+
register_run(run_response.run_id) # type: ignore
|
|
1803
|
+
|
|
1804
|
+
# 1. Read or create session. Reads from the database if provided.
|
|
1805
|
+
if self._has_async_db():
|
|
1806
|
+
team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1807
|
+
else:
|
|
1808
|
+
team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1809
|
+
|
|
1810
|
+
# 2. Update metadata and session state
|
|
1811
|
+
self._update_metadata(session=team_session)
|
|
1812
|
+
session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
|
|
1813
|
+
|
|
1814
|
+
if store_member_responses is None:
|
|
1815
|
+
store_member_responses = False if self.store_member_responses is None else self.store_member_responses
|
|
1779
1816
|
|
|
1780
1817
|
run_input = cast(TeamRunInput, run_response.input)
|
|
1781
|
-
|
|
1782
|
-
#
|
|
1818
|
+
|
|
1819
|
+
# 3. Execute pre-hooks after session is loaded but before processing starts
|
|
1783
1820
|
if self.pre_hooks is not None:
|
|
1784
1821
|
pre_hook_iterator = self._aexecute_pre_hooks(
|
|
1785
1822
|
hooks=self.pre_hooks, # type: ignore
|
|
1786
1823
|
run_response=run_response,
|
|
1787
1824
|
run_input=run_input,
|
|
1788
|
-
session=
|
|
1825
|
+
session=team_session,
|
|
1789
1826
|
user_id=user_id,
|
|
1790
1827
|
debug_mode=debug_mode,
|
|
1791
1828
|
**kwargs,
|
|
@@ -1795,14 +1832,14 @@ class Team:
|
|
|
1795
1832
|
async for _ in pre_hook_iterator:
|
|
1796
1833
|
pass
|
|
1797
1834
|
|
|
1798
|
-
#
|
|
1835
|
+
# 4. Determine tools for model
|
|
1799
1836
|
team_run_context: Dict[str, Any] = {}
|
|
1800
|
-
|
|
1837
|
+
self.model = cast(Model, self.model)
|
|
1801
1838
|
self.determine_tools_for_model(
|
|
1802
1839
|
model=self.model,
|
|
1803
1840
|
run_response=run_response,
|
|
1804
1841
|
team_run_context=team_run_context,
|
|
1805
|
-
session=
|
|
1842
|
+
session=team_session,
|
|
1806
1843
|
session_state=session_state,
|
|
1807
1844
|
user_id=user_id,
|
|
1808
1845
|
async_mode=True,
|
|
@@ -1812,7 +1849,6 @@ class Team:
|
|
|
1812
1849
|
videos=run_input.videos,
|
|
1813
1850
|
audio=run_input.audios,
|
|
1814
1851
|
files=run_input.files,
|
|
1815
|
-
workflow_context=workflow_context,
|
|
1816
1852
|
debug_mode=debug_mode,
|
|
1817
1853
|
add_history_to_context=add_history_to_context,
|
|
1818
1854
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
@@ -1821,39 +1857,36 @@ class Team:
|
|
|
1821
1857
|
metadata=metadata,
|
|
1822
1858
|
)
|
|
1823
1859
|
|
|
1824
|
-
#
|
|
1825
|
-
run_messages = self.
|
|
1860
|
+
# 5. Prepare run messages
|
|
1861
|
+
run_messages = await self._aget_run_messages(
|
|
1826
1862
|
run_response=run_response,
|
|
1827
|
-
session=
|
|
1863
|
+
session=team_session, # type: ignore
|
|
1828
1864
|
session_state=session_state,
|
|
1829
1865
|
user_id=user_id,
|
|
1830
|
-
input_message=
|
|
1831
|
-
audio=
|
|
1832
|
-
images=
|
|
1833
|
-
videos=
|
|
1834
|
-
files=
|
|
1866
|
+
input_message=input,
|
|
1867
|
+
audio=audio,
|
|
1868
|
+
images=images,
|
|
1869
|
+
videos=videos,
|
|
1870
|
+
files=files,
|
|
1835
1871
|
knowledge_filters=knowledge_filters,
|
|
1836
1872
|
add_history_to_context=add_history_to_context,
|
|
1837
1873
|
dependencies=dependencies,
|
|
1838
1874
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
1839
1875
|
add_session_state_to_context=add_session_state_to_context,
|
|
1840
|
-
metadata=metadata,
|
|
1841
1876
|
**kwargs,
|
|
1842
1877
|
)
|
|
1843
1878
|
|
|
1844
|
-
self.model = cast(Model, self.model)
|
|
1845
|
-
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
|
|
1846
|
-
|
|
1847
1879
|
# Register run for cancellation tracking
|
|
1848
1880
|
register_run(run_response.run_id) # type: ignore
|
|
1849
1881
|
|
|
1850
|
-
#
|
|
1882
|
+
# 6. Reason about the task(s) if reasoning is enabled
|
|
1851
1883
|
await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
1852
1884
|
|
|
1853
1885
|
# Check for cancellation before model call
|
|
1854
1886
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1855
1887
|
|
|
1856
|
-
#
|
|
1888
|
+
# 7. Get the model response for the team leader
|
|
1889
|
+
self.model = cast(Model, self.model)
|
|
1857
1890
|
model_response = await self.model.aresponse(
|
|
1858
1891
|
messages=run_messages.messages,
|
|
1859
1892
|
tools=self._tools_for_model,
|
|
@@ -1863,60 +1896,65 @@ class Team:
|
|
|
1863
1896
|
response_format=response_format,
|
|
1864
1897
|
send_media_to_model=self.send_media_to_model,
|
|
1865
1898
|
) # type: ignore
|
|
1866
|
-
|
|
1867
|
-
# Check for cancellation after model call
|
|
1868
1899
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1869
1900
|
|
|
1870
1901
|
# If an output model is provided, generate output using the output model
|
|
1871
1902
|
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
1872
|
-
|
|
1873
1903
|
# If a parser model is provided, structure the response separately
|
|
1874
1904
|
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1875
1905
|
|
|
1876
|
-
#
|
|
1906
|
+
# 8. Update TeamRunOutput
|
|
1877
1907
|
self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
|
|
1878
1908
|
|
|
1909
|
+
# Optional: Store media
|
|
1879
1910
|
if self.store_media:
|
|
1880
1911
|
self._store_media(run_response, model_response)
|
|
1881
1912
|
else:
|
|
1882
1913
|
self._scrub_media_from_run_output(run_response)
|
|
1883
1914
|
|
|
1915
|
+
# 9. Add the run to memory
|
|
1916
|
+
team_session.upsert_run(run_response=run_response)
|
|
1917
|
+
|
|
1918
|
+
# 10. Calculate session metrics
|
|
1919
|
+
self._update_session_metrics(session=team_session)
|
|
1920
|
+
|
|
1884
1921
|
run_response.status = RunStatus.completed
|
|
1885
1922
|
|
|
1886
|
-
# Parse team response model
|
|
1923
|
+
# 11. Parse team response model
|
|
1887
1924
|
self._convert_response_to_structured_format(run_response=run_response)
|
|
1888
1925
|
|
|
1889
1926
|
# Set the run duration
|
|
1890
1927
|
if run_response.metrics:
|
|
1891
1928
|
run_response.metrics.stop_timer()
|
|
1892
1929
|
|
|
1893
|
-
#
|
|
1894
|
-
session.upsert_run(run_response=run_response)
|
|
1895
|
-
|
|
1896
|
-
# 6. Update Team Memory
|
|
1930
|
+
# 12. Update Team Memory
|
|
1897
1931
|
async for _ in self._amake_memories_and_summaries(
|
|
1898
1932
|
run_response=run_response,
|
|
1899
|
-
session=
|
|
1933
|
+
session=team_session,
|
|
1900
1934
|
run_messages=run_messages,
|
|
1901
1935
|
user_id=user_id,
|
|
1902
1936
|
):
|
|
1903
1937
|
pass
|
|
1904
1938
|
|
|
1905
|
-
#
|
|
1906
|
-
self.
|
|
1939
|
+
# 13. Scrub the stored run based on storage flags
|
|
1940
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1941
|
+
team_session.upsert_run(run_response=run_response)
|
|
1907
1942
|
|
|
1908
|
-
#
|
|
1909
|
-
self.
|
|
1943
|
+
# 14. Save session to storage
|
|
1944
|
+
if self._has_async_db():
|
|
1945
|
+
await self.asave_session(session=team_session)
|
|
1946
|
+
else:
|
|
1947
|
+
self.save_session(session=team_session)
|
|
1910
1948
|
|
|
1911
|
-
#
|
|
1912
|
-
await self._alog_team_telemetry(session_id=
|
|
1949
|
+
# Log Team Telemetry
|
|
1950
|
+
await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
|
|
1913
1951
|
|
|
1914
|
-
# Execute post-hooks after output is generated but before response is returned
|
|
1952
|
+
# 15. Execute post-hooks after output is generated but before response is returned
|
|
1915
1953
|
if self.post_hooks is not None:
|
|
1916
1954
|
await self._aexecute_post_hooks(
|
|
1917
1955
|
hooks=self.post_hooks, # type: ignore
|
|
1918
1956
|
run_output=run_response,
|
|
1919
|
-
session=
|
|
1957
|
+
session=team_session,
|
|
1920
1958
|
user_id=user_id,
|
|
1921
1959
|
debug_mode=debug_mode,
|
|
1922
1960
|
**kwargs,
|
|
@@ -1924,48 +1962,68 @@ class Team:
|
|
|
1924
1962
|
|
|
1925
1963
|
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
|
|
1926
1964
|
|
|
1927
|
-
# Always clean up the run tracking
|
|
1928
1965
|
cleanup_run(run_response.run_id) # type: ignore
|
|
1929
1966
|
|
|
1930
1967
|
return run_response
|
|
1931
1968
|
|
|
1932
1969
|
async def _arun_stream(
|
|
1933
1970
|
self,
|
|
1971
|
+
input: Union[str, List, Dict, Message, BaseModel],
|
|
1934
1972
|
run_response: TeamRunOutput,
|
|
1935
|
-
|
|
1936
|
-
session_state: Dict[str, Any],
|
|
1973
|
+
session_id: str,
|
|
1974
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
1937
1975
|
user_id: Optional[str] = None,
|
|
1938
|
-
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1939
|
-
add_history_to_context: Optional[bool] = None,
|
|
1940
|
-
add_dependencies_to_context: Optional[bool] = None,
|
|
1941
|
-
add_session_state_to_context: Optional[bool] = None,
|
|
1942
|
-
metadata: Optional[Dict[str, Any]] = None,
|
|
1943
1976
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1944
|
-
dependencies: Optional[Dict[str, Any]] = None,
|
|
1945
1977
|
stream_intermediate_steps: bool = False,
|
|
1946
|
-
workflow_context: Optional[Dict] = None,
|
|
1947
1978
|
yield_run_response: bool = False,
|
|
1979
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
1980
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
1981
|
+
add_history_to_context: Optional[bool] = None,
|
|
1982
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1983
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1984
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
1985
|
+
images: Optional[Sequence[Image]] = None,
|
|
1986
|
+
videos: Optional[Sequence[Video]] = None,
|
|
1987
|
+
files: Optional[Sequence[File]] = None,
|
|
1948
1988
|
debug_mode: Optional[bool] = None,
|
|
1989
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
1949
1990
|
**kwargs: Any,
|
|
1950
1991
|
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
|
|
1951
1992
|
"""Run the Team and return the response.
|
|
1952
1993
|
|
|
1953
1994
|
Steps:
|
|
1954
1995
|
1. Resolve dependencies
|
|
1955
|
-
2.
|
|
1956
|
-
3.
|
|
1957
|
-
4.
|
|
1958
|
-
5.
|
|
1959
|
-
6.
|
|
1960
|
-
7.
|
|
1961
|
-
8.
|
|
1996
|
+
2. Read or create session
|
|
1997
|
+
3. Update metadata and session state
|
|
1998
|
+
4. Execute pre-hooks
|
|
1999
|
+
5. Determine tools for model
|
|
2000
|
+
6. Prepare run messages
|
|
2001
|
+
7. Yield the run started event
|
|
2002
|
+
8. Reason about the task(s) if reasoning is enabled
|
|
2003
|
+
9. Get a response from the model
|
|
2004
|
+
10. Add the run to memory
|
|
2005
|
+
11. Update Team Memory
|
|
2006
|
+
12. Calculate session metrics
|
|
2007
|
+
13. Create the run completed event
|
|
2008
|
+
14. Scrub the stored run if needed
|
|
2009
|
+
15. Save session to storage
|
|
1962
2010
|
"""
|
|
1963
2011
|
|
|
1964
|
-
# 1. Resolve
|
|
2012
|
+
# 1. Resolve dependencies
|
|
1965
2013
|
if dependencies is not None:
|
|
1966
|
-
|
|
2014
|
+
self._resolve_run_dependencies(dependencies=dependencies)
|
|
2015
|
+
|
|
2016
|
+
# 2. Read or create session. Reads from the database if provided.
|
|
2017
|
+
if self._has_async_db():
|
|
2018
|
+
team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
2019
|
+
else:
|
|
2020
|
+
team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2021
|
+
|
|
2022
|
+
# 3. Update metadata and session state
|
|
2023
|
+
self._update_metadata(session=team_session)
|
|
2024
|
+
session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
|
|
1967
2025
|
|
|
1968
|
-
# Execute pre-hooks
|
|
2026
|
+
# 4. Execute pre-hooks
|
|
1969
2027
|
run_input = cast(TeamRunInput, run_response.input)
|
|
1970
2028
|
self.model = cast(Model, self.model)
|
|
1971
2029
|
if self.pre_hooks is not None:
|
|
@@ -1973,7 +2031,7 @@ class Team:
|
|
|
1973
2031
|
hooks=self.pre_hooks, # type: ignore
|
|
1974
2032
|
run_response=run_response,
|
|
1975
2033
|
run_input=run_input,
|
|
1976
|
-
session=
|
|
2034
|
+
session=team_session,
|
|
1977
2035
|
user_id=user_id,
|
|
1978
2036
|
debug_mode=debug_mode,
|
|
1979
2037
|
**kwargs,
|
|
@@ -1981,43 +2039,40 @@ class Team:
|
|
|
1981
2039
|
async for pre_hook_event in pre_hook_iterator:
|
|
1982
2040
|
yield pre_hook_event
|
|
1983
2041
|
|
|
1984
|
-
#
|
|
2042
|
+
# 5. Determine tools for model
|
|
1985
2043
|
team_run_context: Dict[str, Any] = {}
|
|
1986
|
-
|
|
2044
|
+
self.model = cast(Model, self.model)
|
|
1987
2045
|
self.determine_tools_for_model(
|
|
1988
2046
|
model=self.model,
|
|
1989
2047
|
run_response=run_response,
|
|
1990
2048
|
team_run_context=team_run_context,
|
|
1991
|
-
session=
|
|
2049
|
+
session=team_session, # type: ignore
|
|
1992
2050
|
session_state=session_state,
|
|
1993
2051
|
user_id=user_id,
|
|
1994
2052
|
async_mode=True,
|
|
1995
2053
|
knowledge_filters=knowledge_filters,
|
|
1996
|
-
input_message=
|
|
1997
|
-
images=
|
|
1998
|
-
videos=
|
|
1999
|
-
audio=
|
|
2000
|
-
files=
|
|
2001
|
-
workflow_context=workflow_context,
|
|
2054
|
+
input_message=input,
|
|
2055
|
+
images=images,
|
|
2056
|
+
videos=videos,
|
|
2057
|
+
audio=audio,
|
|
2058
|
+
files=files,
|
|
2002
2059
|
debug_mode=debug_mode,
|
|
2003
2060
|
add_history_to_context=add_history_to_context,
|
|
2004
|
-
add_dependencies_to_context=add_dependencies_to_context,
|
|
2005
|
-
add_session_state_to_context=add_session_state_to_context,
|
|
2006
2061
|
dependencies=dependencies,
|
|
2007
2062
|
metadata=metadata,
|
|
2008
2063
|
)
|
|
2009
2064
|
|
|
2010
|
-
#
|
|
2011
|
-
run_messages = self.
|
|
2065
|
+
# 6. Prepare run messages
|
|
2066
|
+
run_messages = await self._aget_run_messages(
|
|
2012
2067
|
run_response=run_response,
|
|
2013
|
-
session=
|
|
2068
|
+
session=team_session, # type: ignore
|
|
2014
2069
|
session_state=session_state,
|
|
2015
2070
|
user_id=user_id,
|
|
2016
|
-
input_message=
|
|
2017
|
-
audio=
|
|
2018
|
-
images=
|
|
2019
|
-
videos=
|
|
2020
|
-
files=
|
|
2071
|
+
input_message=input,
|
|
2072
|
+
audio=audio,
|
|
2073
|
+
images=images,
|
|
2074
|
+
videos=videos,
|
|
2075
|
+
files=files,
|
|
2021
2076
|
knowledge_filters=knowledge_filters,
|
|
2022
2077
|
add_history_to_context=add_history_to_context,
|
|
2023
2078
|
dependencies=dependencies,
|
|
@@ -2033,13 +2088,11 @@ class Team:
|
|
|
2033
2088
|
register_run(run_response.run_id) # type: ignore
|
|
2034
2089
|
|
|
2035
2090
|
try:
|
|
2036
|
-
#
|
|
2091
|
+
# 7. Yield the run started event
|
|
2037
2092
|
if stream_intermediate_steps:
|
|
2038
|
-
yield self._handle_event(
|
|
2039
|
-
create_team_run_started_event(from_run_response=run_response), run_response, workflow_context
|
|
2040
|
-
)
|
|
2093
|
+
yield self._handle_event(create_team_run_started_event(from_run_response=run_response), run_response)
|
|
2041
2094
|
|
|
2042
|
-
#
|
|
2095
|
+
# 8. Reason about the task(s) if reasoning is enabled
|
|
2043
2096
|
async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
|
|
2044
2097
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2045
2098
|
yield item
|
|
@@ -2047,26 +2100,24 @@ class Team:
|
|
|
2047
2100
|
# Check for cancellation before model processing
|
|
2048
2101
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2049
2102
|
|
|
2050
|
-
#
|
|
2103
|
+
# 9. Get a response from the model
|
|
2051
2104
|
if self.output_model is None:
|
|
2052
2105
|
async for event in self._ahandle_model_response_stream(
|
|
2053
|
-
session=
|
|
2106
|
+
session=team_session,
|
|
2054
2107
|
run_response=run_response,
|
|
2055
2108
|
run_messages=run_messages,
|
|
2056
2109
|
response_format=response_format,
|
|
2057
2110
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2058
|
-
workflow_context=workflow_context,
|
|
2059
2111
|
):
|
|
2060
2112
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2061
2113
|
yield event
|
|
2062
2114
|
else:
|
|
2063
2115
|
async for event in self._ahandle_model_response_stream(
|
|
2064
|
-
session=
|
|
2116
|
+
session=team_session,
|
|
2065
2117
|
run_response=run_response,
|
|
2066
2118
|
run_messages=run_messages,
|
|
2067
2119
|
response_format=response_format,
|
|
2068
2120
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2069
|
-
workflow_context=workflow_context,
|
|
2070
2121
|
):
|
|
2071
2122
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2072
2123
|
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
|
|
@@ -2081,11 +2132,10 @@ class Team:
|
|
|
2081
2132
|
yield event
|
|
2082
2133
|
|
|
2083
2134
|
async for event in self._agenerate_response_with_output_model_stream(
|
|
2084
|
-
session=
|
|
2135
|
+
session=team_session,
|
|
2085
2136
|
run_response=run_response,
|
|
2086
2137
|
run_messages=run_messages,
|
|
2087
2138
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2088
|
-
workflow_context=workflow_context,
|
|
2089
2139
|
):
|
|
2090
2140
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2091
2141
|
yield event
|
|
@@ -2095,37 +2145,41 @@ class Team:
|
|
|
2095
2145
|
|
|
2096
2146
|
# If a parser model is provided, structure the response separately
|
|
2097
2147
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
2098
|
-
session=
|
|
2148
|
+
session=team_session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
|
|
2099
2149
|
):
|
|
2100
2150
|
yield event
|
|
2101
2151
|
|
|
2102
2152
|
run_response.status = RunStatus.completed
|
|
2103
2153
|
|
|
2104
|
-
#
|
|
2105
|
-
|
|
2106
|
-
run_response.metrics.stop_timer()
|
|
2107
|
-
|
|
2108
|
-
# 5. Add the run to Team Session
|
|
2109
|
-
session.upsert_run(run_response=run_response)
|
|
2154
|
+
# 10. Add the run to memory
|
|
2155
|
+
team_session.upsert_run(run_response=run_response)
|
|
2110
2156
|
|
|
2111
|
-
#
|
|
2157
|
+
# 11. Update Team Memory
|
|
2112
2158
|
async for event in self._amake_memories_and_summaries(
|
|
2113
2159
|
run_response=run_response,
|
|
2114
|
-
session=
|
|
2160
|
+
session=team_session,
|
|
2115
2161
|
run_messages=run_messages,
|
|
2116
2162
|
user_id=user_id,
|
|
2117
2163
|
):
|
|
2118
2164
|
yield event
|
|
2119
2165
|
|
|
2120
|
-
#
|
|
2121
|
-
self._update_session_metrics(session=
|
|
2166
|
+
# 12. Calculate session metrics
|
|
2167
|
+
self._update_session_metrics(session=team_session)
|
|
2122
2168
|
|
|
2169
|
+
# 13. Create the run completed event
|
|
2123
2170
|
completed_event = self._handle_event(
|
|
2124
|
-
create_team_run_completed_event(from_run_response=run_response), run_response
|
|
2171
|
+
create_team_run_completed_event(from_run_response=run_response), run_response
|
|
2125
2172
|
)
|
|
2126
2173
|
|
|
2127
|
-
#
|
|
2128
|
-
self.
|
|
2174
|
+
# 14. Scrub the stored run based on storage flags
|
|
2175
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
2176
|
+
team_session.upsert_run(run_response=run_response)
|
|
2177
|
+
|
|
2178
|
+
# 15. Save the session to storage
|
|
2179
|
+
if self._has_async_db():
|
|
2180
|
+
await self.asave_session(session=team_session)
|
|
2181
|
+
else:
|
|
2182
|
+
self.save_session(session=team_session)
|
|
2129
2183
|
|
|
2130
2184
|
if stream_intermediate_steps:
|
|
2131
2185
|
yield completed_event
|
|
@@ -2134,7 +2188,7 @@ class Team:
|
|
|
2134
2188
|
yield run_response
|
|
2135
2189
|
|
|
2136
2190
|
# Log Team Telemetry
|
|
2137
|
-
await self._alog_team_telemetry(session_id=
|
|
2191
|
+
await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
|
|
2138
2192
|
|
|
2139
2193
|
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
|
|
2140
2194
|
|
|
@@ -2148,12 +2202,14 @@ class Team:
|
|
|
2148
2202
|
yield self._handle_event(
|
|
2149
2203
|
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
2150
2204
|
run_response,
|
|
2151
|
-
workflow_context,
|
|
2152
2205
|
)
|
|
2153
2206
|
|
|
2154
2207
|
# Add the RunOutput to Team Session even when cancelled
|
|
2155
|
-
|
|
2156
|
-
self.
|
|
2208
|
+
team_session.upsert_run(run_response=run_response)
|
|
2209
|
+
if self._has_async_db():
|
|
2210
|
+
await self.asave_session(session=team_session)
|
|
2211
|
+
else:
|
|
2212
|
+
self.save_session(session=team_session)
|
|
2157
2213
|
finally:
|
|
2158
2214
|
# Always clean up the run tracking
|
|
2159
2215
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -2260,25 +2316,8 @@ class Team:
|
|
|
2260
2316
|
images=images, videos=videos, audios=audio, files=files
|
|
2261
2317
|
)
|
|
2262
2318
|
|
|
2263
|
-
#
|
|
2264
|
-
run_input = TeamRunInput(
|
|
2265
|
-
input_content=validated_input,
|
|
2266
|
-
images=image_artifacts,
|
|
2267
|
-
videos=video_artifacts,
|
|
2268
|
-
audios=audio_artifacts,
|
|
2269
|
-
files=file_artifacts,
|
|
2270
|
-
)
|
|
2271
|
-
|
|
2272
|
-
team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2273
|
-
self._update_metadata(session=team_session)
|
|
2274
|
-
|
|
2275
|
-
# Update session state from DB
|
|
2276
|
-
session_state = self._load_session_state(session=team_session, session_state=session_state)
|
|
2277
|
-
|
|
2278
|
-
# Determine run dependencies (runtime override takes priority)
|
|
2319
|
+
# Resolve variables
|
|
2279
2320
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
2280
|
-
|
|
2281
|
-
# Determine runtime context parameters
|
|
2282
2321
|
add_dependencies = (
|
|
2283
2322
|
add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
|
|
2284
2323
|
)
|
|
@@ -2289,13 +2328,14 @@ class Team:
|
|
|
2289
2328
|
)
|
|
2290
2329
|
add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
|
|
2291
2330
|
|
|
2292
|
-
#
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2331
|
+
# Create RunInput to capture the original user input
|
|
2332
|
+
run_input = TeamRunInput(
|
|
2333
|
+
input_content=validated_input,
|
|
2334
|
+
images=image_artifacts,
|
|
2335
|
+
videos=video_artifacts,
|
|
2336
|
+
audios=audio_artifacts,
|
|
2337
|
+
files=files,
|
|
2338
|
+
)
|
|
2299
2339
|
|
|
2300
2340
|
# Use stream override value when necessary
|
|
2301
2341
|
if stream is None:
|
|
@@ -2326,6 +2366,11 @@ class Team:
|
|
|
2326
2366
|
else:
|
|
2327
2367
|
metadata = self.metadata
|
|
2328
2368
|
|
|
2369
|
+
# Get knowledge filters
|
|
2370
|
+
effective_filters = knowledge_filters
|
|
2371
|
+
if self.knowledge_filters or knowledge_filters:
|
|
2372
|
+
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2373
|
+
|
|
2329
2374
|
# Create a new run_response for this attempt
|
|
2330
2375
|
run_response = TeamRunOutput(
|
|
2331
2376
|
run_id=run_id,
|
|
@@ -2355,8 +2400,9 @@ class Team:
|
|
|
2355
2400
|
try:
|
|
2356
2401
|
if stream:
|
|
2357
2402
|
response_iterator = self._arun_stream(
|
|
2403
|
+
input=validated_input,
|
|
2358
2404
|
run_response=run_response,
|
|
2359
|
-
|
|
2405
|
+
session_id=session_id,
|
|
2360
2406
|
session_state=session_state,
|
|
2361
2407
|
user_id=user_id,
|
|
2362
2408
|
knowledge_filters=effective_filters,
|
|
@@ -2367,7 +2413,6 @@ class Team:
|
|
|
2367
2413
|
response_format=response_format,
|
|
2368
2414
|
dependencies=run_dependencies,
|
|
2369
2415
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2370
|
-
workflow_context=workflow_context,
|
|
2371
2416
|
yield_run_response=yield_run_response,
|
|
2372
2417
|
debug_mode=debug_mode,
|
|
2373
2418
|
**kwargs,
|
|
@@ -2375,19 +2420,23 @@ class Team:
|
|
|
2375
2420
|
return response_iterator # type: ignore
|
|
2376
2421
|
else:
|
|
2377
2422
|
return self._arun( # type: ignore
|
|
2423
|
+
input=validated_input,
|
|
2378
2424
|
run_response=run_response,
|
|
2379
|
-
|
|
2380
|
-
user_id=user_id,
|
|
2425
|
+
session_id=session_id,
|
|
2381
2426
|
session_state=session_state,
|
|
2427
|
+
user_id=user_id,
|
|
2428
|
+
audio=audio,
|
|
2429
|
+
images=images,
|
|
2430
|
+
videos=videos,
|
|
2431
|
+
files=files,
|
|
2382
2432
|
knowledge_filters=effective_filters,
|
|
2383
2433
|
add_history_to_context=add_history,
|
|
2384
2434
|
add_dependencies_to_context=add_dependencies,
|
|
2385
2435
|
add_session_state_to_context=add_session_state,
|
|
2386
2436
|
metadata=metadata,
|
|
2387
2437
|
response_format=response_format,
|
|
2388
|
-
dependencies=run_dependencies,
|
|
2389
|
-
workflow_context=workflow_context,
|
|
2390
2438
|
debug_mode=debug_mode,
|
|
2439
|
+
dependencies=run_dependencies,
|
|
2391
2440
|
**kwargs,
|
|
2392
2441
|
)
|
|
2393
2442
|
|
|
@@ -2405,17 +2454,6 @@ class Team:
|
|
|
2405
2454
|
import time
|
|
2406
2455
|
|
|
2407
2456
|
time.sleep(delay)
|
|
2408
|
-
except RunCancelledException as e:
|
|
2409
|
-
# Handle run cancellation
|
|
2410
|
-
log_info(f"Team run {run_response.run_id} was cancelled")
|
|
2411
|
-
run_response.content = str(e)
|
|
2412
|
-
run_response.status = RunStatus.cancelled
|
|
2413
|
-
|
|
2414
|
-
# Add the RunOutput to Team Session even when cancelled
|
|
2415
|
-
team_session.upsert_run(run_response=run_response)
|
|
2416
|
-
self.save_session(session=team_session)
|
|
2417
|
-
|
|
2418
|
-
return run_response
|
|
2419
2457
|
except KeyboardInterrupt:
|
|
2420
2458
|
run_response.content = "Operation cancelled by user"
|
|
2421
2459
|
run_response.status = RunStatus.cancelled
|
|
@@ -2526,7 +2564,6 @@ class Team:
|
|
|
2526
2564
|
run_messages: RunMessages,
|
|
2527
2565
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2528
2566
|
stream_intermediate_steps: bool = False,
|
|
2529
|
-
workflow_context: Optional[Dict] = None,
|
|
2530
2567
|
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
2531
2568
|
self.model = cast(Model, self.model)
|
|
2532
2569
|
|
|
@@ -2559,7 +2596,6 @@ class Team:
|
|
|
2559
2596
|
reasoning_state=reasoning_state,
|
|
2560
2597
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2561
2598
|
parse_structured_output=self.should_parse_structured_output,
|
|
2562
|
-
workflow_context=workflow_context,
|
|
2563
2599
|
)
|
|
2564
2600
|
|
|
2565
2601
|
# 3. Update TeamRunOutput
|
|
@@ -2608,7 +2644,6 @@ class Team:
|
|
|
2608
2644
|
run_messages: RunMessages,
|
|
2609
2645
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2610
2646
|
stream_intermediate_steps: bool = False,
|
|
2611
|
-
workflow_context: Optional[Dict] = None,
|
|
2612
2647
|
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
2613
2648
|
self.model = cast(Model, self.model)
|
|
2614
2649
|
|
|
@@ -2642,7 +2677,6 @@ class Team:
|
|
|
2642
2677
|
reasoning_state=reasoning_state,
|
|
2643
2678
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
2644
2679
|
parse_structured_output=self.should_parse_structured_output,
|
|
2645
|
-
workflow_context=workflow_context,
|
|
2646
2680
|
):
|
|
2647
2681
|
yield event
|
|
2648
2682
|
|
|
@@ -2695,7 +2729,6 @@ class Team:
|
|
|
2695
2729
|
reasoning_state: Optional[Dict[str, Any]] = None,
|
|
2696
2730
|
stream_intermediate_steps: bool = False,
|
|
2697
2731
|
parse_structured_output: bool = False,
|
|
2698
|
-
workflow_context: Optional[Dict] = None,
|
|
2699
2732
|
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
2700
2733
|
if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
|
|
2701
2734
|
model_response_event, tuple(get_args(TeamRunOutputEvent))
|
|
@@ -2829,7 +2862,6 @@ class Team:
|
|
|
2829
2862
|
image=model_response_event.images[-1] if model_response_event.images else None,
|
|
2830
2863
|
),
|
|
2831
2864
|
run_response,
|
|
2832
|
-
workflow_context=workflow_context,
|
|
2833
2865
|
)
|
|
2834
2866
|
else:
|
|
2835
2867
|
yield self._handle_event(
|
|
@@ -2839,7 +2871,6 @@ class Team:
|
|
|
2839
2871
|
content_type=content_type,
|
|
2840
2872
|
),
|
|
2841
2873
|
run_response,
|
|
2842
|
-
workflow_context=workflow_context,
|
|
2843
2874
|
)
|
|
2844
2875
|
|
|
2845
2876
|
# If the model response is a tool_call_started, add the tool call to the run_response
|
|
@@ -3052,7 +3083,9 @@ class Team:
|
|
|
3052
3083
|
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
3053
3084
|
)
|
|
3054
3085
|
if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
3055
|
-
tasks.append(
|
|
3086
|
+
tasks.append(
|
|
3087
|
+
self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id, team_id=self.id)
|
|
3088
|
+
)
|
|
3056
3089
|
|
|
3057
3090
|
if self.session_summary_manager is not None:
|
|
3058
3091
|
tasks.append(self.session_summary_manager.acreate_session_summary(session=session))
|
|
@@ -3285,7 +3318,6 @@ class Team:
|
|
|
3285
3318
|
run_response: TeamRunOutput,
|
|
3286
3319
|
run_messages: RunMessages,
|
|
3287
3320
|
stream_intermediate_steps: bool = False,
|
|
3288
|
-
workflow_context: Optional[Dict] = None,
|
|
3289
3321
|
):
|
|
3290
3322
|
"""Parse the model response using the output model stream."""
|
|
3291
3323
|
from agno.utils.events import (
|
|
@@ -3308,7 +3340,6 @@ class Team:
|
|
|
3308
3340
|
run_response=run_response,
|
|
3309
3341
|
full_model_response=model_response,
|
|
3310
3342
|
model_response_event=model_response_event,
|
|
3311
|
-
workflow_context=workflow_context,
|
|
3312
3343
|
)
|
|
3313
3344
|
|
|
3314
3345
|
# Update the TeamRunResponse content
|
|
@@ -3341,7 +3372,6 @@ class Team:
|
|
|
3341
3372
|
run_response: TeamRunOutput,
|
|
3342
3373
|
run_messages: RunMessages,
|
|
3343
3374
|
stream_intermediate_steps: bool = False,
|
|
3344
|
-
workflow_context: Optional[Dict] = None,
|
|
3345
3375
|
):
|
|
3346
3376
|
"""Parse the model response using the output model stream."""
|
|
3347
3377
|
from agno.utils.events import (
|
|
@@ -3364,7 +3394,6 @@ class Team:
|
|
|
3364
3394
|
run_response=run_response,
|
|
3365
3395
|
full_model_response=model_response,
|
|
3366
3396
|
model_response_event=model_response_event,
|
|
3367
|
-
workflow_context=workflow_context,
|
|
3368
3397
|
):
|
|
3369
3398
|
yield event
|
|
3370
3399
|
|
|
@@ -3385,15 +3414,7 @@ class Team:
|
|
|
3385
3414
|
self,
|
|
3386
3415
|
event: Union[RunOutputEvent, TeamRunOutputEvent],
|
|
3387
3416
|
run_response: TeamRunOutput,
|
|
3388
|
-
workflow_context: Optional[Dict] = None,
|
|
3389
3417
|
):
|
|
3390
|
-
if workflow_context:
|
|
3391
|
-
event.workflow_id = workflow_context.get("workflow_id")
|
|
3392
|
-
event.workflow_run_id = workflow_context.get("workflow_run_id")
|
|
3393
|
-
event.step_id = workflow_context.get("step_id")
|
|
3394
|
-
event.step_name = workflow_context.get("step_name")
|
|
3395
|
-
event.step_index = workflow_context.get("step_index")
|
|
3396
|
-
|
|
3397
3418
|
# We only store events that are not run_response_content events
|
|
3398
3419
|
events_to_skip = [event.value for event in self.events_to_skip] if self.events_to_skip else []
|
|
3399
3420
|
if self.store_events and event.event not in events_to_skip:
|
|
@@ -3434,6 +3455,11 @@ class Team:
|
|
|
3434
3455
|
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
3435
3456
|
**kwargs: Any,
|
|
3436
3457
|
) -> None:
|
|
3458
|
+
if self._has_async_db():
|
|
3459
|
+
raise Exception(
|
|
3460
|
+
"This method is not supported with an async DB. Please use the async version of this method."
|
|
3461
|
+
)
|
|
3462
|
+
|
|
3437
3463
|
if not tags_to_include_in_markdown:
|
|
3438
3464
|
tags_to_include_in_markdown = {"think", "thinking"}
|
|
3439
3465
|
|
|
@@ -3652,6 +3678,53 @@ class Team:
|
|
|
3652
3678
|
message.image_output = None
|
|
3653
3679
|
message.video_output = None
|
|
3654
3680
|
|
|
3681
|
+
def _scrub_tool_results_from_run_output(self, run_response: TeamRunOutput) -> None:
|
|
3682
|
+
"""
|
|
3683
|
+
Remove all tool-related data from TeamRunOutput when store_tool_results=False.
|
|
3684
|
+
This includes tool calls, tool results, and tool-related message fields.
|
|
3685
|
+
"""
|
|
3686
|
+
# Remove tool results (messages with role="tool")
|
|
3687
|
+
if run_response.messages:
|
|
3688
|
+
run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
|
|
3689
|
+
# Also scrub tool-related fields from remaining messages
|
|
3690
|
+
for message in run_response.messages:
|
|
3691
|
+
self._scrub_tool_data_from_message(message)
|
|
3692
|
+
|
|
3693
|
+
def _scrub_tool_data_from_message(self, message: Message) -> None:
|
|
3694
|
+
"""Remove tool-related data from a Message object."""
|
|
3695
|
+
message.tool_calls = None
|
|
3696
|
+
message.tool_call_id = None
|
|
3697
|
+
|
|
3698
|
+
def _scrub_history_messages_from_run_output(self, run_response: TeamRunOutput) -> None:
|
|
3699
|
+
"""
|
|
3700
|
+
Remove all history messages from TeamRunOutput when store_history_messages=False.
|
|
3701
|
+
This removes messages that were loaded from the team's memory.
|
|
3702
|
+
"""
|
|
3703
|
+
# Remove messages with from_history=True
|
|
3704
|
+
if run_response.messages:
|
|
3705
|
+
run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
|
|
3706
|
+
|
|
3707
|
+
def _scrub_run_output_for_storage(self, run_response: TeamRunOutput) -> bool:
|
|
3708
|
+
"""
|
|
3709
|
+
Scrub run output based on storage flags before persisting to database.
|
|
3710
|
+
Returns True if any scrubbing was done, False otherwise.
|
|
3711
|
+
"""
|
|
3712
|
+
scrubbed = False
|
|
3713
|
+
|
|
3714
|
+
if not self.store_media:
|
|
3715
|
+
self._scrub_media_from_run_output(run_response)
|
|
3716
|
+
scrubbed = True
|
|
3717
|
+
|
|
3718
|
+
if not self.store_tool_results:
|
|
3719
|
+
self._scrub_tool_results_from_run_output(run_response)
|
|
3720
|
+
scrubbed = True
|
|
3721
|
+
|
|
3722
|
+
if not self.store_history_messages:
|
|
3723
|
+
self._scrub_history_messages_from_run_output(run_response)
|
|
3724
|
+
scrubbed = True
|
|
3725
|
+
|
|
3726
|
+
return scrubbed
|
|
3727
|
+
|
|
3655
3728
|
def _validate_media_object_id(
|
|
3656
3729
|
self,
|
|
3657
3730
|
images: Optional[Sequence[Image]] = None,
|
|
@@ -3895,12 +3968,15 @@ class Team:
|
|
|
3895
3968
|
|
|
3896
3969
|
# If a reasoning model is provided, use it to generate reasoning
|
|
3897
3970
|
if reasoning_model_provided:
|
|
3971
|
+
from agno.reasoning.anthropic import is_anthropic_reasoning_model
|
|
3898
3972
|
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
|
|
3899
3973
|
from agno.reasoning.deepseek import is_deepseek_reasoning_model
|
|
3974
|
+
from agno.reasoning.gemini import is_gemini_reasoning_model
|
|
3900
3975
|
from agno.reasoning.groq import is_groq_reasoning_model
|
|
3901
3976
|
from agno.reasoning.helpers import get_reasoning_agent
|
|
3902
3977
|
from agno.reasoning.ollama import is_ollama_reasoning_model
|
|
3903
3978
|
from agno.reasoning.openai import is_openai_reasoning_model
|
|
3979
|
+
from agno.reasoning.vertexai import is_vertexai_reasoning_model
|
|
3904
3980
|
|
|
3905
3981
|
reasoning_agent = self.reasoning_agent or get_reasoning_agent(
|
|
3906
3982
|
reasoning_model=reasoning_model,
|
|
@@ -3913,8 +3989,20 @@ class Team:
|
|
|
3913
3989
|
is_openai = is_openai_reasoning_model(reasoning_model)
|
|
3914
3990
|
is_ollama = is_ollama_reasoning_model(reasoning_model)
|
|
3915
3991
|
is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
|
|
3992
|
+
is_gemini = is_gemini_reasoning_model(reasoning_model)
|
|
3993
|
+
is_anthropic = is_anthropic_reasoning_model(reasoning_model)
|
|
3994
|
+
is_vertexai = is_vertexai_reasoning_model(reasoning_model)
|
|
3916
3995
|
|
|
3917
|
-
if
|
|
3996
|
+
if (
|
|
3997
|
+
is_deepseek
|
|
3998
|
+
or is_groq
|
|
3999
|
+
or is_openai
|
|
4000
|
+
or is_ollama
|
|
4001
|
+
or is_ai_foundry
|
|
4002
|
+
or is_gemini
|
|
4003
|
+
or is_anthropic
|
|
4004
|
+
or is_vertexai
|
|
4005
|
+
):
|
|
3918
4006
|
reasoning_message: Optional[Message] = None
|
|
3919
4007
|
if is_deepseek:
|
|
3920
4008
|
from agno.reasoning.deepseek import get_deepseek_reasoning
|
|
@@ -3951,6 +4039,27 @@ class Team:
|
|
|
3951
4039
|
reasoning_message = get_ai_foundry_reasoning(
|
|
3952
4040
|
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
3953
4041
|
)
|
|
4042
|
+
elif is_gemini:
|
|
4043
|
+
from agno.reasoning.gemini import get_gemini_reasoning
|
|
4044
|
+
|
|
4045
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
4046
|
+
reasoning_message = get_gemini_reasoning(
|
|
4047
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4048
|
+
)
|
|
4049
|
+
elif is_anthropic:
|
|
4050
|
+
from agno.reasoning.anthropic import get_anthropic_reasoning
|
|
4051
|
+
|
|
4052
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
4053
|
+
reasoning_message = get_anthropic_reasoning(
|
|
4054
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4055
|
+
)
|
|
4056
|
+
elif is_vertexai:
|
|
4057
|
+
from agno.reasoning.vertexai import get_vertexai_reasoning
|
|
4058
|
+
|
|
4059
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
4060
|
+
reasoning_message = get_vertexai_reasoning(
|
|
4061
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4062
|
+
)
|
|
3954
4063
|
|
|
3955
4064
|
if reasoning_message is None:
|
|
3956
4065
|
log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
|
|
@@ -4129,12 +4238,15 @@ class Team:
|
|
|
4129
4238
|
|
|
4130
4239
|
# If a reasoning model is provided, use it to generate reasoning
|
|
4131
4240
|
if reasoning_model_provided:
|
|
4241
|
+
from agno.reasoning.anthropic import is_anthropic_reasoning_model
|
|
4132
4242
|
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
|
|
4133
4243
|
from agno.reasoning.deepseek import is_deepseek_reasoning_model
|
|
4244
|
+
from agno.reasoning.gemini import is_gemini_reasoning_model
|
|
4134
4245
|
from agno.reasoning.groq import is_groq_reasoning_model
|
|
4135
4246
|
from agno.reasoning.helpers import get_reasoning_agent
|
|
4136
4247
|
from agno.reasoning.ollama import is_ollama_reasoning_model
|
|
4137
4248
|
from agno.reasoning.openai import is_openai_reasoning_model
|
|
4249
|
+
from agno.reasoning.vertexai import is_vertexai_reasoning_model
|
|
4138
4250
|
|
|
4139
4251
|
reasoning_agent = self.reasoning_agent or get_reasoning_agent(
|
|
4140
4252
|
reasoning_model=reasoning_model,
|
|
@@ -4147,8 +4259,20 @@ class Team:
|
|
|
4147
4259
|
is_openai = is_openai_reasoning_model(reasoning_model)
|
|
4148
4260
|
is_ollama = is_ollama_reasoning_model(reasoning_model)
|
|
4149
4261
|
is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
|
|
4262
|
+
is_gemini = is_gemini_reasoning_model(reasoning_model)
|
|
4263
|
+
is_anthropic = is_anthropic_reasoning_model(reasoning_model)
|
|
4264
|
+
is_vertexai = is_vertexai_reasoning_model(reasoning_model)
|
|
4150
4265
|
|
|
4151
|
-
if
|
|
4266
|
+
if (
|
|
4267
|
+
is_deepseek
|
|
4268
|
+
or is_groq
|
|
4269
|
+
or is_openai
|
|
4270
|
+
or is_ollama
|
|
4271
|
+
or is_ai_foundry
|
|
4272
|
+
or is_gemini
|
|
4273
|
+
or is_anthropic
|
|
4274
|
+
or is_vertexai
|
|
4275
|
+
):
|
|
4152
4276
|
reasoning_message: Optional[Message] = None
|
|
4153
4277
|
if is_deepseek:
|
|
4154
4278
|
from agno.reasoning.deepseek import aget_deepseek_reasoning
|
|
@@ -4185,10 +4309,31 @@ class Team:
|
|
|
4185
4309
|
reasoning_message = get_ai_foundry_reasoning(
|
|
4186
4310
|
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4187
4311
|
)
|
|
4312
|
+
elif is_gemini:
|
|
4313
|
+
from agno.reasoning.gemini import aget_gemini_reasoning
|
|
4188
4314
|
|
|
4189
|
-
|
|
4190
|
-
|
|
4191
|
-
|
|
4315
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
4316
|
+
reasoning_message = await aget_gemini_reasoning(
|
|
4317
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4318
|
+
)
|
|
4319
|
+
elif is_anthropic:
|
|
4320
|
+
from agno.reasoning.anthropic import aget_anthropic_reasoning
|
|
4321
|
+
|
|
4322
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
4323
|
+
reasoning_message = await aget_anthropic_reasoning(
|
|
4324
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4325
|
+
)
|
|
4326
|
+
elif is_vertexai:
|
|
4327
|
+
from agno.reasoning.vertexai import aget_vertexai_reasoning
|
|
4328
|
+
|
|
4329
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
4330
|
+
reasoning_message = await aget_vertexai_reasoning(
|
|
4331
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
4332
|
+
)
|
|
4333
|
+
|
|
4334
|
+
if reasoning_message is None:
|
|
4335
|
+
log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
|
|
4336
|
+
return
|
|
4192
4337
|
run_messages.messages.append(reasoning_message)
|
|
4193
4338
|
# Add reasoning step to the Agent's run_response
|
|
4194
4339
|
update_run_output_with_reasoning(
|
|
@@ -4529,7 +4674,6 @@ class Team:
|
|
|
4529
4674
|
videos: Optional[Sequence[Video]] = None,
|
|
4530
4675
|
audio: Optional[Sequence[Audio]] = None,
|
|
4531
4676
|
files: Optional[Sequence[File]] = None,
|
|
4532
|
-
workflow_context: Optional[Dict] = None,
|
|
4533
4677
|
debug_mode: Optional[bool] = None,
|
|
4534
4678
|
add_history_to_context: Optional[bool] = None,
|
|
4535
4679
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
@@ -4623,7 +4767,6 @@ class Team:
|
|
|
4623
4767
|
files=files, # type: ignore
|
|
4624
4768
|
knowledge_filters=knowledge_filters,
|
|
4625
4769
|
add_history_to_context=add_history_to_context,
|
|
4626
|
-
workflow_context=workflow_context,
|
|
4627
4770
|
dependencies=dependencies,
|
|
4628
4771
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
4629
4772
|
add_session_state_to_context=add_session_state_to_context,
|
|
@@ -4756,7 +4899,7 @@ class Team:
|
|
|
4756
4899
|
system_message_content += f"{indent * ' '} - Name: {member.name}\n"
|
|
4757
4900
|
if member.role is not None:
|
|
4758
4901
|
system_message_content += f"{indent * ' '} - Role: {member.role}\n"
|
|
4759
|
-
if member.tools and self.add_member_tools_to_context:
|
|
4902
|
+
if member.tools is not None and member.tools != [] and self.add_member_tools_to_context:
|
|
4760
4903
|
system_message_content += f"{indent * ' '} - Member tools:\n"
|
|
4761
4904
|
for _tool in member.tools:
|
|
4762
4905
|
if isinstance(_tool, Toolkit):
|
|
@@ -4960,7 +5103,308 @@ class Team:
|
|
|
4960
5103
|
if self.memory_manager is None:
|
|
4961
5104
|
self._set_memory_manager()
|
|
4962
5105
|
_memory_manager_not_set = True
|
|
4963
|
-
user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
5106
|
+
user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
5107
|
+
if user_memories and len(user_memories) > 0:
|
|
5108
|
+
system_message_content += (
|
|
5109
|
+
"You have access to memories from previous interactions with the user that you can use:\n\n"
|
|
5110
|
+
)
|
|
5111
|
+
system_message_content += "<memories_from_previous_interactions>"
|
|
5112
|
+
for _memory in user_memories: # type: ignore
|
|
5113
|
+
system_message_content += f"\n- {_memory.memory}"
|
|
5114
|
+
system_message_content += "\n</memories_from_previous_interactions>\n\n"
|
|
5115
|
+
system_message_content += (
|
|
5116
|
+
"Note: this information is from previous interactions and may be updated in this conversation. "
|
|
5117
|
+
"You should always prefer information from this conversation over the past memories.\n"
|
|
5118
|
+
)
|
|
5119
|
+
else:
|
|
5120
|
+
system_message_content += (
|
|
5121
|
+
"You have the capability to retain memories from previous interactions with the user, "
|
|
5122
|
+
"but have not had any interactions with the user yet.\n"
|
|
5123
|
+
)
|
|
5124
|
+
if _memory_manager_not_set:
|
|
5125
|
+
self.memory_manager = None
|
|
5126
|
+
|
|
5127
|
+
if self.enable_agentic_memory:
|
|
5128
|
+
system_message_content += (
|
|
5129
|
+
"\n<updating_user_memories>\n"
|
|
5130
|
+
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
|
|
5131
|
+
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
|
|
5132
|
+
"- Memories should include details that could personalize ongoing interactions with the user.\n"
|
|
5133
|
+
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
|
|
5134
|
+
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
|
|
5135
|
+
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
|
|
5136
|
+
"</updating_user_memories>\n\n"
|
|
5137
|
+
)
|
|
5138
|
+
|
|
5139
|
+
# Then add a summary of the interaction to the system prompt
|
|
5140
|
+
if self.add_session_summary_to_context and session.summary is not None:
|
|
5141
|
+
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
|
|
5142
|
+
system_message_content += "<summary_of_previous_interactions>\n"
|
|
5143
|
+
system_message_content += session.summary.summary
|
|
5144
|
+
system_message_content += "\n</summary_of_previous_interactions>\n\n"
|
|
5145
|
+
system_message_content += (
|
|
5146
|
+
"Note: this information is from previous interactions and may be outdated. "
|
|
5147
|
+
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
|
|
5148
|
+
)
|
|
5149
|
+
|
|
5150
|
+
if self.description is not None:
|
|
5151
|
+
system_message_content += f"<description>\n{self.description}\n</description>\n\n"
|
|
5152
|
+
|
|
5153
|
+
if self.role is not None:
|
|
5154
|
+
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
5155
|
+
|
|
5156
|
+
# 3.3.5 Then add instructions for the Agent
|
|
5157
|
+
if len(instructions) > 0:
|
|
5158
|
+
system_message_content += "<instructions>"
|
|
5159
|
+
if len(instructions) > 1:
|
|
5160
|
+
for _upi in instructions:
|
|
5161
|
+
system_message_content += f"\n- {_upi}"
|
|
5162
|
+
else:
|
|
5163
|
+
system_message_content += "\n" + instructions[0]
|
|
5164
|
+
system_message_content += "\n</instructions>\n\n"
|
|
5165
|
+
# 3.3.6 Add additional information
|
|
5166
|
+
if len(additional_information) > 0:
|
|
5167
|
+
system_message_content += "<additional_information>"
|
|
5168
|
+
for _ai in additional_information:
|
|
5169
|
+
system_message_content += f"\n- {_ai}"
|
|
5170
|
+
system_message_content += "\n</additional_information>\n\n"
|
|
5171
|
+
# 3.3.7 Then add instructions for the tools
|
|
5172
|
+
if self._tool_instructions is not None:
|
|
5173
|
+
for _ti in self._tool_instructions:
|
|
5174
|
+
system_message_content += f"{_ti}\n"
|
|
5175
|
+
|
|
5176
|
+
# Format the system message with the session state variables
|
|
5177
|
+
if self.resolve_in_context:
|
|
5178
|
+
system_message_content = self._format_message_with_state_variables(
|
|
5179
|
+
system_message_content,
|
|
5180
|
+
user_id=user_id,
|
|
5181
|
+
session_state=session_state,
|
|
5182
|
+
dependencies=dependencies,
|
|
5183
|
+
metadata=metadata,
|
|
5184
|
+
)
|
|
5185
|
+
|
|
5186
|
+
system_message_from_model = self.model.get_system_message_for_model(self._tools_for_model)
|
|
5187
|
+
if system_message_from_model is not None:
|
|
5188
|
+
system_message_content += system_message_from_model
|
|
5189
|
+
|
|
5190
|
+
if self.expected_output is not None:
|
|
5191
|
+
system_message_content += f"<expected_output>\n{self.expected_output.strip()}\n</expected_output>\n\n"
|
|
5192
|
+
|
|
5193
|
+
if self.additional_context is not None:
|
|
5194
|
+
system_message_content += (
|
|
5195
|
+
f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
|
|
5196
|
+
)
|
|
5197
|
+
|
|
5198
|
+
if add_session_state_to_context and session_state is not None:
|
|
5199
|
+
system_message_content += self._get_formatted_session_state_for_system_message(session_state)
|
|
5200
|
+
|
|
5201
|
+
# Add the JSON output prompt if output_schema is provided and structured_outputs is False
|
|
5202
|
+
if (
|
|
5203
|
+
self.output_schema is not None
|
|
5204
|
+
and self.use_json_mode
|
|
5205
|
+
and self.model
|
|
5206
|
+
and self.model.supports_native_structured_outputs
|
|
5207
|
+
):
|
|
5208
|
+
system_message_content += f"{self._get_json_output_prompt()}"
|
|
5209
|
+
|
|
5210
|
+
return Message(role=self.system_message_role, content=system_message_content.strip())
|
|
5211
|
+
|
|
5212
|
+
async def aget_system_message(
|
|
5213
|
+
self,
|
|
5214
|
+
session: TeamSession,
|
|
5215
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
5216
|
+
user_id: Optional[str] = None,
|
|
5217
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
5218
|
+
images: Optional[Sequence[Image]] = None,
|
|
5219
|
+
videos: Optional[Sequence[Video]] = None,
|
|
5220
|
+
files: Optional[Sequence[File]] = None,
|
|
5221
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
5222
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
5223
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
5224
|
+
) -> Optional[Message]:
|
|
5225
|
+
"""Get the system message for the team."""
|
|
5226
|
+
|
|
5227
|
+
# 1. If the system_message is provided, use that.
|
|
5228
|
+
if self.system_message is not None:
|
|
5229
|
+
if isinstance(self.system_message, Message):
|
|
5230
|
+
return self.system_message
|
|
5231
|
+
|
|
5232
|
+
sys_message_content: str = ""
|
|
5233
|
+
if isinstance(self.system_message, str):
|
|
5234
|
+
sys_message_content = self.system_message
|
|
5235
|
+
elif callable(self.system_message):
|
|
5236
|
+
sys_message_content = self.system_message(agent=self)
|
|
5237
|
+
if not isinstance(sys_message_content, str):
|
|
5238
|
+
raise Exception("system_message must return a string")
|
|
5239
|
+
|
|
5240
|
+
# Format the system message with the session state variables
|
|
5241
|
+
if self.resolve_in_context:
|
|
5242
|
+
sys_message_content = self._format_message_with_state_variables(
|
|
5243
|
+
sys_message_content,
|
|
5244
|
+
user_id=user_id,
|
|
5245
|
+
session_state=session_state,
|
|
5246
|
+
dependencies=dependencies,
|
|
5247
|
+
metadata=metadata,
|
|
5248
|
+
)
|
|
5249
|
+
|
|
5250
|
+
# type: ignore
|
|
5251
|
+
return Message(role=self.system_message_role, content=sys_message_content)
|
|
5252
|
+
|
|
5253
|
+
# 1. Build and return the default system message for the Team.
|
|
5254
|
+
# 1.1 Build the list of instructions for the system message
|
|
5255
|
+
self.model = cast(Model, self.model)
|
|
5256
|
+
instructions: List[str] = []
|
|
5257
|
+
if self.instructions is not None:
|
|
5258
|
+
_instructions = self.instructions
|
|
5259
|
+
if callable(self.instructions):
|
|
5260
|
+
import inspect
|
|
5261
|
+
|
|
5262
|
+
signature = inspect.signature(self.instructions)
|
|
5263
|
+
if "team" in signature.parameters:
|
|
5264
|
+
_instructions = self.instructions(team=self)
|
|
5265
|
+
elif "agent" in signature.parameters:
|
|
5266
|
+
_instructions = self.instructions(agent=self)
|
|
5267
|
+
else:
|
|
5268
|
+
_instructions = self.instructions()
|
|
5269
|
+
|
|
5270
|
+
if isinstance(_instructions, str):
|
|
5271
|
+
instructions.append(_instructions)
|
|
5272
|
+
elif isinstance(_instructions, list):
|
|
5273
|
+
instructions.extend(_instructions)
|
|
5274
|
+
|
|
5275
|
+
# 1.2 Add instructions from the Model
|
|
5276
|
+
_model_instructions = self.model.get_instructions_for_model(self._tools_for_model)
|
|
5277
|
+
if _model_instructions is not None:
|
|
5278
|
+
instructions.extend(_model_instructions)
|
|
5279
|
+
|
|
5280
|
+
# 1.3 Build a list of additional information for the system message
|
|
5281
|
+
additional_information: List[str] = []
|
|
5282
|
+
# 1.3.1 Add instructions for using markdown
|
|
5283
|
+
if self.markdown and self.output_schema is None:
|
|
5284
|
+
additional_information.append("Use markdown to format your answers.")
|
|
5285
|
+
# 1.3.2 Add the current datetime
|
|
5286
|
+
if self.add_datetime_to_context:
|
|
5287
|
+
from datetime import datetime
|
|
5288
|
+
|
|
5289
|
+
tz = None
|
|
5290
|
+
|
|
5291
|
+
if self.timezone_identifier:
|
|
5292
|
+
try:
|
|
5293
|
+
from zoneinfo import ZoneInfo
|
|
5294
|
+
|
|
5295
|
+
tz = ZoneInfo(self.timezone_identifier)
|
|
5296
|
+
except Exception:
|
|
5297
|
+
log_warning("Invalid timezone identifier")
|
|
5298
|
+
|
|
5299
|
+
time = datetime.now(tz) if tz else datetime.now()
|
|
5300
|
+
|
|
5301
|
+
additional_information.append(f"The current time is {time}.")
|
|
5302
|
+
|
|
5303
|
+
# 1.3.3 Add the current location
|
|
5304
|
+
if self.add_location_to_context:
|
|
5305
|
+
from agno.utils.location import get_location
|
|
5306
|
+
|
|
5307
|
+
location = get_location()
|
|
5308
|
+
if location:
|
|
5309
|
+
location_str = ", ".join(
|
|
5310
|
+
filter(None, [location.get("city"), location.get("region"), location.get("country")])
|
|
5311
|
+
)
|
|
5312
|
+
if location_str:
|
|
5313
|
+
additional_information.append(f"Your approximate location is: {location_str}.")
|
|
5314
|
+
|
|
5315
|
+
# 1.3.4 Add team name if provided
|
|
5316
|
+
if self.name is not None and self.add_name_to_context:
|
|
5317
|
+
additional_information.append(f"Your name is: {self.name}.")
|
|
5318
|
+
|
|
5319
|
+
if self.knowledge is not None and self.enable_agentic_knowledge_filters:
|
|
5320
|
+
valid_filters = getattr(self.knowledge, "valid_metadata_filters", None)
|
|
5321
|
+
if valid_filters:
|
|
5322
|
+
valid_filters_str = ", ".join(valid_filters)
|
|
5323
|
+
additional_information.append(
|
|
5324
|
+
dedent(f"""
|
|
5325
|
+
The knowledge base contains documents with these metadata filters: {valid_filters_str}.
|
|
5326
|
+
Always use filters when the user query indicates specific metadata.
|
|
5327
|
+
Examples:
|
|
5328
|
+
1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
|
|
5329
|
+
2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
|
|
5330
|
+
4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
|
|
5331
|
+
General Guidelines:
|
|
5332
|
+
- Always analyze the user query to identify relevant metadata.
|
|
5333
|
+
- Use the most specific filter(s) possible to narrow down results.
|
|
5334
|
+
- If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
|
|
5335
|
+
- Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
|
|
5336
|
+
You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
|
|
5337
|
+
""")
|
|
5338
|
+
)
|
|
5339
|
+
|
|
5340
|
+
# 2 Build the default system message for the Agent.
|
|
5341
|
+
system_message_content: str = ""
|
|
5342
|
+
system_message_content += "You are the leader of a team and sub-teams of AI Agents.\n"
|
|
5343
|
+
system_message_content += "Your task is to coordinate the team to complete the user's request.\n"
|
|
5344
|
+
|
|
5345
|
+
system_message_content += "\nHere are the members in your team:\n"
|
|
5346
|
+
system_message_content += "<team_members>\n"
|
|
5347
|
+
system_message_content += self.get_members_system_message_content()
|
|
5348
|
+
if self.get_member_information_tool:
|
|
5349
|
+
system_message_content += "If you need to get information about your team members, you can use the `get_member_information` tool at any time.\n"
|
|
5350
|
+
system_message_content += "</team_members>\n"
|
|
5351
|
+
|
|
5352
|
+
system_message_content += "\n<how_to_respond>\n"
|
|
5353
|
+
|
|
5354
|
+
if self.delegate_task_to_all_members:
|
|
5355
|
+
system_message_content += (
|
|
5356
|
+
"- Your role is to forward tasks to members in your team with the highest likelihood of completing the user's request.\n"
|
|
5357
|
+
"- You can either respond directly or use the `delegate_task_to_members` tool to delegate a task to all members in your team to get a collaborative response.\n"
|
|
5358
|
+
"- To delegate a task to all members in your team, call `delegate_task_to_members` ONLY once. This will delegate a task to all members in your team.\n"
|
|
5359
|
+
"- Analyze the responses from all members and evaluate whether the task has been completed.\n"
|
|
5360
|
+
"- If you feel the task has been completed, you can stop and respond to the user.\n"
|
|
5361
|
+
)
|
|
5362
|
+
else:
|
|
5363
|
+
system_message_content += (
|
|
5364
|
+
"- Your role is to delegate tasks to members in your team with the highest likelihood of completing the user's request.\n"
|
|
5365
|
+
"- Carefully analyze the tools available to the members and their roles before delegating tasks.\n"
|
|
5366
|
+
"- You cannot use a member tool directly. You can only delegate tasks to members.\n"
|
|
5367
|
+
"- When you delegate a task to another member, make sure to include:\n"
|
|
5368
|
+
" - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
|
|
5369
|
+
" - task_description (str): A clear description of the task.\n"
|
|
5370
|
+
" - expected_output (str): The expected output.\n"
|
|
5371
|
+
"- You can delegate tasks to multiple members at once.\n"
|
|
5372
|
+
"- You must always analyze the responses from members before responding to the user.\n"
|
|
5373
|
+
"- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
|
|
5374
|
+
"- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
|
|
5375
|
+
"- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
|
|
5376
|
+
"- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
|
|
5377
|
+
)
|
|
5378
|
+
system_message_content += "</how_to_respond>\n\n"
|
|
5379
|
+
|
|
5380
|
+
# Attached media
|
|
5381
|
+
if audio is not None or images is not None or videos is not None or files is not None:
|
|
5382
|
+
system_message_content += "<attached_media>\n"
|
|
5383
|
+
system_message_content += "You have the following media attached to your message:\n"
|
|
5384
|
+
if audio is not None and len(audio) > 0:
|
|
5385
|
+
system_message_content += " - Audio\n"
|
|
5386
|
+
if images is not None and len(images) > 0:
|
|
5387
|
+
system_message_content += " - Images\n"
|
|
5388
|
+
if videos is not None and len(videos) > 0:
|
|
5389
|
+
system_message_content += " - Videos\n"
|
|
5390
|
+
if files is not None and len(files) > 0:
|
|
5391
|
+
system_message_content += " - Files\n"
|
|
5392
|
+
system_message_content += "</attached_media>\n\n"
|
|
5393
|
+
|
|
5394
|
+
# Then add memories to the system prompt
|
|
5395
|
+
if self.add_memories_to_context:
|
|
5396
|
+
_memory_manager_not_set = False
|
|
5397
|
+
if not user_id:
|
|
5398
|
+
user_id = "default"
|
|
5399
|
+
if self.memory_manager is None:
|
|
5400
|
+
self._set_memory_manager()
|
|
5401
|
+
_memory_manager_not_set = True
|
|
5402
|
+
|
|
5403
|
+
if self._has_async_db():
|
|
5404
|
+
user_memories = await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
|
|
5405
|
+
else:
|
|
5406
|
+
user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
5407
|
+
|
|
4964
5408
|
if user_memories and len(user_memories) > 0:
|
|
4965
5409
|
system_message_content += (
|
|
4966
5410
|
"You have access to memories from previous interactions with the user that you can use:\n\n"
|
|
@@ -5052,8 +5496,8 @@ class Team:
|
|
|
5052
5496
|
f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
|
|
5053
5497
|
)
|
|
5054
5498
|
|
|
5055
|
-
if add_session_state_to_context
|
|
5056
|
-
system_message_content +=
|
|
5499
|
+
if self.add_session_state_to_context:
|
|
5500
|
+
system_message_content += f"<session_state>\n{session_state}\n</session_state>\n\n"
|
|
5057
5501
|
|
|
5058
5502
|
# Add the JSON output prompt if output_schema is provided and structured_outputs is False
|
|
5059
5503
|
if (
|
|
@@ -5204,6 +5648,134 @@ class Team:
|
|
|
5204
5648
|
|
|
5205
5649
|
return run_messages
|
|
5206
5650
|
|
|
5651
|
+
async def _aget_run_messages(
|
|
5652
|
+
self,
|
|
5653
|
+
*,
|
|
5654
|
+
run_response: TeamRunOutput,
|
|
5655
|
+
session: TeamSession,
|
|
5656
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
5657
|
+
user_id: Optional[str] = None,
|
|
5658
|
+
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
|
|
5659
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
5660
|
+
images: Optional[Sequence[Image]] = None,
|
|
5661
|
+
videos: Optional[Sequence[Video]] = None,
|
|
5662
|
+
files: Optional[Sequence[File]] = None,
|
|
5663
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
5664
|
+
add_history_to_context: Optional[bool] = None,
|
|
5665
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
5666
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
5667
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
5668
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
5669
|
+
**kwargs: Any,
|
|
5670
|
+
) -> RunMessages:
|
|
5671
|
+
"""This function returns a RunMessages object with the following attributes:
|
|
5672
|
+
- system_message: The system message for this run
|
|
5673
|
+
- user_message: The user message for this run
|
|
5674
|
+
- messages: List of messages to send to the model
|
|
5675
|
+
|
|
5676
|
+
To build the RunMessages object:
|
|
5677
|
+
1. Add system message to run_messages
|
|
5678
|
+
2. Add extra messages to run_messages
|
|
5679
|
+
3. Add history to run_messages
|
|
5680
|
+
4. Add messages to run_messages if provided (messages parameter first)
|
|
5681
|
+
5. Add user message to run_messages (message parameter second)
|
|
5682
|
+
|
|
5683
|
+
"""
|
|
5684
|
+
# Initialize the RunMessages object
|
|
5685
|
+
run_messages = RunMessages()
|
|
5686
|
+
|
|
5687
|
+
# 1. Add system message to run_messages
|
|
5688
|
+
system_message = await self.aget_system_message(
|
|
5689
|
+
session=session,
|
|
5690
|
+
session_state=session_state,
|
|
5691
|
+
user_id=user_id,
|
|
5692
|
+
images=images,
|
|
5693
|
+
audio=audio,
|
|
5694
|
+
videos=videos,
|
|
5695
|
+
files=files,
|
|
5696
|
+
dependencies=dependencies,
|
|
5697
|
+
metadata=metadata,
|
|
5698
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5699
|
+
)
|
|
5700
|
+
if system_message is not None:
|
|
5701
|
+
run_messages.system_message = system_message
|
|
5702
|
+
run_messages.messages.append(system_message)
|
|
5703
|
+
|
|
5704
|
+
# 2. Add extra messages to run_messages if provided
|
|
5705
|
+
if self.additional_input is not None:
|
|
5706
|
+
messages_to_add_to_run_response: List[Message] = []
|
|
5707
|
+
if run_messages.extra_messages is None:
|
|
5708
|
+
run_messages.extra_messages = []
|
|
5709
|
+
|
|
5710
|
+
for _m in self.additional_input:
|
|
5711
|
+
if isinstance(_m, Message):
|
|
5712
|
+
messages_to_add_to_run_response.append(_m)
|
|
5713
|
+
run_messages.messages.append(_m)
|
|
5714
|
+
run_messages.extra_messages.append(_m)
|
|
5715
|
+
elif isinstance(_m, dict):
|
|
5716
|
+
try:
|
|
5717
|
+
_m_parsed = Message.model_validate(_m)
|
|
5718
|
+
messages_to_add_to_run_response.append(_m_parsed)
|
|
5719
|
+
run_messages.messages.append(_m_parsed)
|
|
5720
|
+
run_messages.extra_messages.append(_m_parsed)
|
|
5721
|
+
except Exception as e:
|
|
5722
|
+
log_warning(f"Failed to validate message: {e}")
|
|
5723
|
+
# Add the extra messages to the run_response
|
|
5724
|
+
if len(messages_to_add_to_run_response) > 0:
|
|
5725
|
+
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
|
|
5726
|
+
if run_response.additional_input is None:
|
|
5727
|
+
run_response.additional_input = messages_to_add_to_run_response
|
|
5728
|
+
else:
|
|
5729
|
+
run_response.additional_input.extend(messages_to_add_to_run_response)
|
|
5730
|
+
|
|
5731
|
+
# 3. Add history to run_messages
|
|
5732
|
+
if add_history_to_context:
|
|
5733
|
+
from copy import deepcopy
|
|
5734
|
+
|
|
5735
|
+
history = session.get_messages_from_last_n_runs(
|
|
5736
|
+
last_n=self.num_history_runs,
|
|
5737
|
+
skip_role=self.system_message_role,
|
|
5738
|
+
team_id=self.id,
|
|
5739
|
+
)
|
|
5740
|
+
|
|
5741
|
+
if len(history) > 0:
|
|
5742
|
+
# Create a deep copy of the history messages to avoid modifying the original messages
|
|
5743
|
+
history_copy = [deepcopy(msg) for msg in history]
|
|
5744
|
+
|
|
5745
|
+
# Tag each message as coming from history
|
|
5746
|
+
for _msg in history_copy:
|
|
5747
|
+
_msg.from_history = True
|
|
5748
|
+
|
|
5749
|
+
log_debug(f"Adding {len(history_copy)} messages from history")
|
|
5750
|
+
|
|
5751
|
+
# Extend the messages with the history
|
|
5752
|
+
run_messages.messages += history_copy
|
|
5753
|
+
|
|
5754
|
+
# 5. Add user message to run_messages (message second as per Dirk's requirement)
|
|
5755
|
+
user_message: Optional[Message] = None
|
|
5756
|
+
# 5.1 Build user message if message is None, str or list
|
|
5757
|
+
user_message = self._get_user_message(
|
|
5758
|
+
run_response=run_response,
|
|
5759
|
+
session_state=session_state,
|
|
5760
|
+
input_message=input_message,
|
|
5761
|
+
user_id=user_id,
|
|
5762
|
+
audio=audio,
|
|
5763
|
+
images=images,
|
|
5764
|
+
videos=videos,
|
|
5765
|
+
files=files,
|
|
5766
|
+
knowledge_filters=knowledge_filters,
|
|
5767
|
+
dependencies=dependencies,
|
|
5768
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5769
|
+
metadata=metadata,
|
|
5770
|
+
**kwargs,
|
|
5771
|
+
)
|
|
5772
|
+
# Add user message to run_messages
|
|
5773
|
+
if user_message is not None:
|
|
5774
|
+
run_messages.user_message = user_message
|
|
5775
|
+
run_messages.messages.append(user_message)
|
|
5776
|
+
|
|
5777
|
+
return run_messages
|
|
5778
|
+
|
|
5207
5779
|
def _get_user_message(
|
|
5208
5780
|
self,
|
|
5209
5781
|
*,
|
|
@@ -5703,6 +6275,7 @@ class Team:
|
|
|
5703
6275
|
if self.db is None:
|
|
5704
6276
|
return "Previous session messages not available"
|
|
5705
6277
|
|
|
6278
|
+
self.db = cast(BaseDb, self.db)
|
|
5706
6279
|
selected_sessions = self.db.get_sessions(
|
|
5707
6280
|
session_type=SessionType.TEAM,
|
|
5708
6281
|
limit=num_history_sessions,
|
|
@@ -5742,7 +6315,62 @@ class Team:
|
|
|
5742
6315
|
|
|
5743
6316
|
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
|
|
5744
6317
|
|
|
5745
|
-
|
|
6318
|
+
async def aget_previous_session_messages() -> str:
|
|
6319
|
+
"""Use this function to retrieve messages from previous chat sessions.
|
|
6320
|
+
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
|
|
6321
|
+
|
|
6322
|
+
Returns:
|
|
6323
|
+
str: JSON formatted list of message pairs from previous sessions
|
|
6324
|
+
"""
|
|
6325
|
+
import json
|
|
6326
|
+
|
|
6327
|
+
if self.db is None:
|
|
6328
|
+
return "Previous session messages not available"
|
|
6329
|
+
|
|
6330
|
+
self.db = cast(AsyncBaseDb, self.db)
|
|
6331
|
+
selected_sessions = await self.db.get_sessions(
|
|
6332
|
+
session_type=SessionType.TEAM,
|
|
6333
|
+
limit=num_history_sessions,
|
|
6334
|
+
user_id=user_id,
|
|
6335
|
+
sort_by="created_at",
|
|
6336
|
+
sort_order="desc",
|
|
6337
|
+
)
|
|
6338
|
+
|
|
6339
|
+
all_messages = []
|
|
6340
|
+
seen_message_pairs = set()
|
|
6341
|
+
|
|
6342
|
+
for session in selected_sessions:
|
|
6343
|
+
if isinstance(session, TeamSession) and session.runs:
|
|
6344
|
+
message_count = 0
|
|
6345
|
+
for run in session.runs:
|
|
6346
|
+
messages = run.messages
|
|
6347
|
+
if messages is not None:
|
|
6348
|
+
for i in range(0, len(messages) - 1, 2):
|
|
6349
|
+
if i + 1 < len(messages):
|
|
6350
|
+
try:
|
|
6351
|
+
user_msg = messages[i]
|
|
6352
|
+
assistant_msg = messages[i + 1]
|
|
6353
|
+
user_content = user_msg.content
|
|
6354
|
+
assistant_content = assistant_msg.content
|
|
6355
|
+
if user_content is None or assistant_content is None:
|
|
6356
|
+
continue # Skip this pair if either message has no content
|
|
6357
|
+
|
|
6358
|
+
msg_pair_id = f"{user_content}:{assistant_content}"
|
|
6359
|
+
if msg_pair_id not in seen_message_pairs:
|
|
6360
|
+
seen_message_pairs.add(msg_pair_id)
|
|
6361
|
+
all_messages.append(Message.model_validate(user_msg))
|
|
6362
|
+
all_messages.append(Message.model_validate(assistant_msg))
|
|
6363
|
+
message_count += 1
|
|
6364
|
+
except Exception as e:
|
|
6365
|
+
log_warning(f"Error processing message pair: {e}")
|
|
6366
|
+
continue
|
|
6367
|
+
|
|
6368
|
+
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
|
|
6369
|
+
|
|
6370
|
+
if self._has_async_db():
|
|
6371
|
+
return aget_previous_session_messages
|
|
6372
|
+
else:
|
|
6373
|
+
return get_previous_session_messages
|
|
5746
6374
|
|
|
5747
6375
|
def _get_history_for_member_agent(self, session: TeamSession, member_agent: Union[Agent, "Team"]) -> List[Message]:
|
|
5748
6376
|
from copy import deepcopy
|
|
@@ -5834,7 +6462,6 @@ class Team:
|
|
|
5834
6462
|
files: Optional[List[File]] = None,
|
|
5835
6463
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
5836
6464
|
add_history_to_context: Optional[bool] = None,
|
|
5837
|
-
workflow_context: Optional[Dict] = None,
|
|
5838
6465
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
5839
6466
|
add_dependencies_to_context: Optional[bool] = None,
|
|
5840
6467
|
add_session_state_to_context: Optional[bool] = None,
|
|
@@ -5989,7 +6616,6 @@ class Team:
|
|
|
5989
6616
|
stream=True,
|
|
5990
6617
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
5991
6618
|
debug_mode=debug_mode,
|
|
5992
|
-
workflow_context=workflow_context,
|
|
5993
6619
|
dependencies=dependencies,
|
|
5994
6620
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
5995
6621
|
metadata=metadata,
|
|
@@ -6029,7 +6655,6 @@ class Team:
|
|
|
6029
6655
|
files=files,
|
|
6030
6656
|
stream=False,
|
|
6031
6657
|
debug_mode=debug_mode,
|
|
6032
|
-
workflow_context=workflow_context,
|
|
6033
6658
|
dependencies=dependencies,
|
|
6034
6659
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
6035
6660
|
add_session_state_to_context=add_session_state_to_context,
|
|
@@ -6121,7 +6746,6 @@ class Team:
|
|
|
6121
6746
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
6122
6747
|
add_session_state_to_context=add_session_state_to_context,
|
|
6123
6748
|
metadata=metadata,
|
|
6124
|
-
workflow_context=workflow_context,
|
|
6125
6749
|
knowledge_filters=knowledge_filters
|
|
6126
6750
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
6127
6751
|
else None,
|
|
@@ -6157,7 +6781,6 @@ class Team:
|
|
|
6157
6781
|
files=files,
|
|
6158
6782
|
stream=False,
|
|
6159
6783
|
debug_mode=debug_mode,
|
|
6160
|
-
workflow_context=workflow_context,
|
|
6161
6784
|
dependencies=dependencies,
|
|
6162
6785
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
6163
6786
|
add_session_state_to_context=add_session_state_to_context,
|
|
@@ -6234,7 +6857,6 @@ class Team:
|
|
|
6234
6857
|
files=files,
|
|
6235
6858
|
stream=True,
|
|
6236
6859
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
6237
|
-
workflow_context=workflow_context,
|
|
6238
6860
|
knowledge_filters=knowledge_filters
|
|
6239
6861
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
6240
6862
|
else None,
|
|
@@ -6275,7 +6897,6 @@ class Team:
|
|
|
6275
6897
|
audio=audio,
|
|
6276
6898
|
files=files,
|
|
6277
6899
|
stream=False,
|
|
6278
|
-
workflow_context=workflow_context,
|
|
6279
6900
|
knowledge_filters=knowledge_filters
|
|
6280
6901
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
6281
6902
|
else None,
|
|
@@ -6352,7 +6973,6 @@ class Team:
|
|
|
6352
6973
|
files=files,
|
|
6353
6974
|
stream=True,
|
|
6354
6975
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
6355
|
-
workflow_context=workflow_context,
|
|
6356
6976
|
debug_mode=debug_mode,
|
|
6357
6977
|
knowledge_filters=knowledge_filters
|
|
6358
6978
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
@@ -6433,7 +7053,6 @@ class Team:
|
|
|
6433
7053
|
stream=False,
|
|
6434
7054
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
6435
7055
|
debug_mode=debug_mode,
|
|
6436
|
-
workflow_context=workflow_context,
|
|
6437
7056
|
knowledge_filters=knowledge_filters
|
|
6438
7057
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
6439
7058
|
else None,
|
|
@@ -6517,6 +7136,18 @@ class Team:
|
|
|
6517
7136
|
log_warning(f"Error getting session from db: {e}")
|
|
6518
7137
|
return None
|
|
6519
7138
|
|
|
7139
|
+
async def _aread_session(self, session_id: str) -> Optional[TeamSession]:
|
|
7140
|
+
"""Get a Session from the database."""
|
|
7141
|
+
try:
|
|
7142
|
+
if not self.db:
|
|
7143
|
+
raise ValueError("Db not initialized")
|
|
7144
|
+
self.db = cast(AsyncBaseDb, self.db)
|
|
7145
|
+
session = await self.db.get_session(session_id=session_id, session_type=SessionType.TEAM)
|
|
7146
|
+
return session # type: ignore
|
|
7147
|
+
except Exception as e:
|
|
7148
|
+
log_warning(f"Error getting session from db: {e}")
|
|
7149
|
+
return None
|
|
7150
|
+
|
|
6520
7151
|
def _upsert_session(self, session: TeamSession) -> Optional[TeamSession]:
|
|
6521
7152
|
"""Upsert a Session into the database."""
|
|
6522
7153
|
|
|
@@ -6528,6 +7159,17 @@ class Team:
|
|
|
6528
7159
|
log_warning(f"Error upserting session into db: {e}")
|
|
6529
7160
|
return None
|
|
6530
7161
|
|
|
7162
|
+
async def _aupsert_session(self, session: TeamSession) -> Optional[TeamSession]:
|
|
7163
|
+
"""Upsert a Session into the database."""
|
|
7164
|
+
|
|
7165
|
+
try:
|
|
7166
|
+
if not self.db:
|
|
7167
|
+
raise ValueError("Db not initialized")
|
|
7168
|
+
return await self.db.upsert_session(session=session) # type: ignore
|
|
7169
|
+
except Exception as e:
|
|
7170
|
+
log_warning(f"Error upserting session into db: {e}")
|
|
7171
|
+
return None
|
|
7172
|
+
|
|
6531
7173
|
def get_run_output(
|
|
6532
7174
|
self, run_id: str, session_id: Optional[str] = None
|
|
6533
7175
|
) -> Optional[Union[TeamRunOutput, RunOutput]]:
|
|
@@ -6617,6 +7259,47 @@ class Team:
|
|
|
6617
7259
|
|
|
6618
7260
|
return team_session
|
|
6619
7261
|
|
|
7262
|
+
async def _aread_or_create_session(self, session_id: str, user_id: Optional[str] = None) -> TeamSession:
|
|
7263
|
+
"""Load the TeamSession from storage
|
|
7264
|
+
|
|
7265
|
+
Returns:
|
|
7266
|
+
Optional[TeamSession]: The loaded TeamSession or None if not found.
|
|
7267
|
+
"""
|
|
7268
|
+
from time import time
|
|
7269
|
+
|
|
7270
|
+
from agno.session.team import TeamSession
|
|
7271
|
+
|
|
7272
|
+
# Return existing session if we have one
|
|
7273
|
+
if self._team_session is not None and self._team_session.session_id == session_id:
|
|
7274
|
+
return self._team_session
|
|
7275
|
+
|
|
7276
|
+
# Try to load from database
|
|
7277
|
+
team_session = None
|
|
7278
|
+
if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
|
|
7279
|
+
if self._has_async_db():
|
|
7280
|
+
team_session = cast(TeamSession, await self._aread_session(session_id=session_id))
|
|
7281
|
+
else:
|
|
7282
|
+
team_session = cast(TeamSession, self._read_session(session_id=session_id))
|
|
7283
|
+
|
|
7284
|
+
# Create new session if none found
|
|
7285
|
+
if team_session is None:
|
|
7286
|
+
log_debug(f"Creating new TeamSession: {session_id}")
|
|
7287
|
+
team_session = TeamSession(
|
|
7288
|
+
session_id=session_id,
|
|
7289
|
+
team_id=self.id,
|
|
7290
|
+
user_id=user_id,
|
|
7291
|
+
team_data=self._get_team_data(),
|
|
7292
|
+
session_data={},
|
|
7293
|
+
metadata=self.metadata,
|
|
7294
|
+
created_at=int(time()),
|
|
7295
|
+
)
|
|
7296
|
+
|
|
7297
|
+
# Cache the session if relevant
|
|
7298
|
+
if team_session is not None and self.cache_session:
|
|
7299
|
+
self._team_session = team_session
|
|
7300
|
+
|
|
7301
|
+
return team_session
|
|
7302
|
+
|
|
6620
7303
|
def get_session(
|
|
6621
7304
|
self,
|
|
6622
7305
|
session_id: Optional[str] = None,
|
|
@@ -6668,6 +7351,16 @@ class Team:
|
|
|
6668
7351
|
self._upsert_session(session=session)
|
|
6669
7352
|
log_debug(f"Created or updated TeamSession record: {session.session_id}")
|
|
6670
7353
|
|
|
7354
|
+
async def asave_session(self, session: TeamSession) -> None:
|
|
7355
|
+
"""Save the TeamSession to storage"""
|
|
7356
|
+
if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
|
|
7357
|
+
if session.session_data is not None and "session_state" in session.session_data:
|
|
7358
|
+
session.session_data["session_state"].pop("current_session_id", None) # type: ignore
|
|
7359
|
+
session.session_data["session_state"].pop("current_user_id", None) # type: ignore
|
|
7360
|
+
session.session_data["session_state"].pop("current_run_id", None) # type: ignore
|
|
7361
|
+
await self._aupsert_session(session=session)
|
|
7362
|
+
log_debug(f"Created or updated TeamSession record: {session.session_id}")
|
|
7363
|
+
|
|
6671
7364
|
def _load_session_state(self, session: TeamSession, session_state: Dict[str, Any]) -> Dict[str, Any]:
|
|
6672
7365
|
"""Load and return the stored session_state from the database, optionally merging it with the given one"""
|
|
6673
7366
|
|