agno 2.1.4__py3-none-any.whl → 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1775 -538
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/async_postgres/async_postgres.py +1668 -0
- agno/db/async_postgres/schemas.py +124 -0
- agno/db/async_postgres/utils.py +289 -0
- agno/db/base.py +237 -2
- agno/db/dynamo/dynamo.py +2 -2
- agno/db/firestore/firestore.py +2 -2
- agno/db/firestore/utils.py +4 -2
- agno/db/gcs_json/gcs_json_db.py +2 -2
- agno/db/in_memory/in_memory_db.py +2 -2
- agno/db/json/json_db.py +2 -2
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/mongo/mongo.py +14 -6
- agno/db/mongo/utils.py +0 -4
- agno/db/mysql/mysql.py +23 -13
- agno/db/postgres/postgres.py +17 -6
- agno/db/redis/redis.py +2 -2
- agno/db/singlestore/singlestore.py +19 -10
- agno/db/sqlite/sqlite.py +22 -12
- agno/db/sqlite/utils.py +8 -3
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +259 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1193 -0
- agno/db/surrealdb/utils.py +87 -0
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +8 -2
- agno/knowledge/knowledge.py +260 -46
- agno/knowledge/reader/pdf_reader.py +4 -6
- agno/knowledge/reader/reader_factory.py +2 -3
- agno/memory/manager.py +254 -46
- agno/models/anthropic/claude.py +37 -0
- agno/os/app.py +8 -7
- agno/os/interfaces/a2a/router.py +3 -5
- agno/os/interfaces/agui/router.py +4 -1
- agno/os/interfaces/agui/utils.py +27 -6
- agno/os/interfaces/slack/router.py +2 -4
- agno/os/mcp.py +98 -41
- agno/os/router.py +23 -0
- agno/os/routers/evals/evals.py +52 -20
- agno/os/routers/evals/utils.py +14 -14
- agno/os/routers/knowledge/knowledge.py +130 -9
- agno/os/routers/knowledge/schemas.py +57 -0
- agno/os/routers/memory/memory.py +116 -44
- agno/os/routers/metrics/metrics.py +16 -6
- agno/os/routers/session/session.py +65 -22
- agno/os/schema.py +36 -0
- agno/os/utils.py +64 -11
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/session/workflow.py +3 -3
- agno/team/team.py +968 -179
- agno/tools/googlesheets.py +20 -5
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/scrapegraph.py +1 -1
- agno/utils/models/claude.py +3 -1
- agno/utils/streamlit.py +1 -1
- agno/vectordb/base.py +22 -1
- agno/vectordb/cassandra/cassandra.py +9 -0
- agno/vectordb/chroma/chromadb.py +26 -6
- agno/vectordb/clickhouse/clickhousedb.py +9 -1
- agno/vectordb/couchbase/couchbase.py +11 -0
- agno/vectordb/lancedb/lance_db.py +20 -0
- agno/vectordb/langchaindb/langchaindb.py +11 -0
- agno/vectordb/lightrag/lightrag.py +9 -0
- agno/vectordb/llamaindex/llamaindexdb.py +15 -1
- agno/vectordb/milvus/milvus.py +23 -0
- agno/vectordb/mongodb/mongodb.py +22 -0
- agno/vectordb/pgvector/pgvector.py +19 -0
- agno/vectordb/pineconedb/pineconedb.py +35 -4
- agno/vectordb/qdrant/qdrant.py +24 -0
- agno/vectordb/singlestore/singlestore.py +25 -17
- agno/vectordb/surrealdb/surrealdb.py +18 -2
- agno/vectordb/upstashdb/upstashdb.py +26 -1
- agno/vectordb/weaviate/weaviate.py +18 -0
- agno/workflow/condition.py +4 -0
- agno/workflow/loop.py +4 -0
- agno/workflow/parallel.py +4 -0
- agno/workflow/router.py +4 -0
- agno/workflow/step.py +30 -14
- agno/workflow/steps.py +4 -0
- agno/workflow/types.py +2 -2
- agno/workflow/workflow.py +328 -61
- {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/METADATA +100 -41
- {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/RECORD +95 -82
- {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/WHEEL +0 -0
- {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -27,7 +27,7 @@ from uuid import uuid4
|
|
|
27
27
|
|
|
28
28
|
from pydantic import BaseModel
|
|
29
29
|
|
|
30
|
-
from agno.db.base import BaseDb, SessionType, UserMemory
|
|
30
|
+
from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
|
|
31
31
|
from agno.exceptions import (
|
|
32
32
|
InputCheckError,
|
|
33
33
|
ModelProviderError,
|
|
@@ -177,7 +177,7 @@ class Agent:
|
|
|
177
177
|
|
|
178
178
|
# --- Database ---
|
|
179
179
|
# Database to use for this agent
|
|
180
|
-
db: Optional[BaseDb] = None
|
|
180
|
+
db: Optional[Union[BaseDb, AsyncBaseDb]] = None
|
|
181
181
|
|
|
182
182
|
# --- Agent History ---
|
|
183
183
|
# add_history_to_context=true adds messages from the chat history to the messages list sent to the Model.
|
|
@@ -247,6 +247,10 @@ class Agent:
|
|
|
247
247
|
send_media_to_model: bool = True
|
|
248
248
|
# If True, store media in run output
|
|
249
249
|
store_media: bool = True
|
|
250
|
+
# If True, store tool results in run output
|
|
251
|
+
store_tool_messages: bool = True
|
|
252
|
+
# If True, store history messages in run output
|
|
253
|
+
store_history_messages: bool = True
|
|
250
254
|
|
|
251
255
|
# --- System message settings ---
|
|
252
256
|
# Provide the system message as a string or function
|
|
@@ -373,7 +377,7 @@ class Agent:
|
|
|
373
377
|
num_history_sessions: Optional[int] = None,
|
|
374
378
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
375
379
|
add_dependencies_to_context: bool = False,
|
|
376
|
-
db: Optional[BaseDb] = None,
|
|
380
|
+
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
|
|
377
381
|
memory_manager: Optional[MemoryManager] = None,
|
|
378
382
|
enable_agentic_memory: bool = False,
|
|
379
383
|
enable_user_memories: bool = False,
|
|
@@ -384,6 +388,8 @@ class Agent:
|
|
|
384
388
|
add_history_to_context: bool = False,
|
|
385
389
|
num_history_runs: int = 3,
|
|
386
390
|
store_media: bool = True,
|
|
391
|
+
store_tool_messages: bool = True,
|
|
392
|
+
store_history_messages: bool = True,
|
|
387
393
|
knowledge: Optional[Knowledge] = None,
|
|
388
394
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
389
395
|
enable_agentic_knowledge_filters: Optional[bool] = None,
|
|
@@ -484,6 +490,8 @@ class Agent:
|
|
|
484
490
|
)
|
|
485
491
|
|
|
486
492
|
self.store_media = store_media
|
|
493
|
+
self.store_tool_messages = store_tool_messages
|
|
494
|
+
self.store_history_messages = store_history_messages
|
|
487
495
|
|
|
488
496
|
self.knowledge = knowledge
|
|
489
497
|
self.knowledge_filters = knowledge_filters
|
|
@@ -691,6 +699,10 @@ class Agent:
|
|
|
691
699
|
self.enable_session_summaries or self.session_summary_manager is not None
|
|
692
700
|
)
|
|
693
701
|
|
|
702
|
+
def _has_async_db(self) -> bool:
|
|
703
|
+
"""Return True if the db the agent is equipped with is an Async implementation"""
|
|
704
|
+
return self.db is not None and isinstance(self.db, AsyncBaseDb)
|
|
705
|
+
|
|
694
706
|
def initialize_agent(self, debug_mode: Optional[bool] = None) -> None:
|
|
695
707
|
self._set_default_model()
|
|
696
708
|
self._set_debug(debug_mode=debug_mode)
|
|
@@ -883,8 +895,6 @@ class Agent:
|
|
|
883
895
|
|
|
884
896
|
if self.store_media:
|
|
885
897
|
self._store_media(run_response, model_response)
|
|
886
|
-
else:
|
|
887
|
-
self._scrub_media_from_run_output(run_response)
|
|
888
898
|
|
|
889
899
|
# We should break out of the run function
|
|
890
900
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
@@ -930,7 +940,11 @@ class Agent:
|
|
|
930
940
|
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
931
941
|
deque(response_iterator, maxlen=0)
|
|
932
942
|
|
|
933
|
-
# 11.
|
|
943
|
+
# 11. Scrub the stored run based on storage flags
|
|
944
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
945
|
+
session.upsert_run(run=run_response)
|
|
946
|
+
|
|
947
|
+
# 12. Save session to memory
|
|
934
948
|
self.save_session(session=session)
|
|
935
949
|
|
|
936
950
|
# Log Agent Telemetry
|
|
@@ -1133,7 +1147,11 @@ class Agent:
|
|
|
1133
1147
|
create_run_completed_event(from_run_response=run_response), run_response
|
|
1134
1148
|
)
|
|
1135
1149
|
|
|
1136
|
-
# 10.
|
|
1150
|
+
# 10. Scrub the stored run based on storage flags
|
|
1151
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1152
|
+
session.upsert_run(run=run_response)
|
|
1153
|
+
|
|
1154
|
+
# 11. Save session to storage
|
|
1137
1155
|
self.save_session(session=session)
|
|
1138
1156
|
|
|
1139
1157
|
if stream_intermediate_steps:
|
|
@@ -1242,6 +1260,10 @@ class Agent:
|
|
|
1242
1260
|
**kwargs: Any,
|
|
1243
1261
|
) -> Union[RunOutput, Iterator[Union[RunOutputEvent, RunOutput]]]:
|
|
1244
1262
|
"""Run the Agent and return the response."""
|
|
1263
|
+
if self._has_async_db():
|
|
1264
|
+
raise RuntimeError(
|
|
1265
|
+
"`run` method is not supported with an async database. Please use `arun` method instead."
|
|
1266
|
+
)
|
|
1245
1267
|
|
|
1246
1268
|
# Create a run_id for this specific run
|
|
1247
1269
|
run_id = str(uuid4())
|
|
@@ -1452,7 +1474,7 @@ class Agent:
|
|
|
1452
1474
|
async def _arun(
|
|
1453
1475
|
self,
|
|
1454
1476
|
run_response: RunOutput,
|
|
1455
|
-
|
|
1477
|
+
session_id: str,
|
|
1456
1478
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1457
1479
|
user_id: Optional[str] = None,
|
|
1458
1480
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -1468,27 +1490,42 @@ class Agent:
|
|
|
1468
1490
|
"""Run the Agent and yield the RunOutput.
|
|
1469
1491
|
|
|
1470
1492
|
Steps:
|
|
1471
|
-
1.
|
|
1472
|
-
2.
|
|
1473
|
-
3.
|
|
1474
|
-
4.
|
|
1475
|
-
5.
|
|
1476
|
-
6.
|
|
1477
|
-
7.
|
|
1478
|
-
8.
|
|
1479
|
-
9.
|
|
1480
|
-
10.
|
|
1481
|
-
11.
|
|
1482
|
-
12.
|
|
1493
|
+
1. Read or create session
|
|
1494
|
+
2. Update metadata and session state
|
|
1495
|
+
3. Resolve dependencies
|
|
1496
|
+
4. Execute pre-hooks
|
|
1497
|
+
5. Determine tools for model
|
|
1498
|
+
6. Prepare run messages
|
|
1499
|
+
7. Reason about the task if reasoning is enabled
|
|
1500
|
+
8. Generate a response from the Model (includes running function calls)
|
|
1501
|
+
9. Update the RunOutput with the model response
|
|
1502
|
+
10. Execute post-hooks
|
|
1503
|
+
11. Add RunOutput to Agent Session
|
|
1504
|
+
12. Update Agent Memory
|
|
1505
|
+
13. Scrub the stored run if needed
|
|
1506
|
+
14. Save session to storage
|
|
1483
1507
|
"""
|
|
1508
|
+
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1509
|
+
|
|
1484
1510
|
# Register run for cancellation tracking
|
|
1485
1511
|
register_run(run_response.run_id) # type: ignore
|
|
1486
1512
|
|
|
1487
|
-
# 1.
|
|
1513
|
+
# 1. Read or create session. Reads from the database if provided.
|
|
1514
|
+
if self._has_async_db():
|
|
1515
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1516
|
+
else:
|
|
1517
|
+
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1518
|
+
|
|
1519
|
+
# 2. Update metadata and session state
|
|
1520
|
+
self._update_metadata(session=agent_session)
|
|
1521
|
+
if session_state is not None:
|
|
1522
|
+
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1523
|
+
|
|
1524
|
+
# 3. Resolve dependencies
|
|
1488
1525
|
if dependencies is not None:
|
|
1489
|
-
await self._aresolve_run_dependencies(dependencies)
|
|
1526
|
+
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
1490
1527
|
|
|
1491
|
-
#
|
|
1528
|
+
# 4. Execute pre-hooks
|
|
1492
1529
|
run_input = cast(RunInput, run_response.input)
|
|
1493
1530
|
self.model = cast(Model, self.model)
|
|
1494
1531
|
if self.pre_hooks is not None:
|
|
@@ -1497,7 +1534,7 @@ class Agent:
|
|
|
1497
1534
|
hooks=self.pre_hooks, # type: ignore
|
|
1498
1535
|
run_response=run_response,
|
|
1499
1536
|
run_input=run_input,
|
|
1500
|
-
session=
|
|
1537
|
+
session=agent_session,
|
|
1501
1538
|
user_id=user_id,
|
|
1502
1539
|
debug_mode=debug_mode,
|
|
1503
1540
|
**kwargs,
|
|
@@ -1506,10 +1543,12 @@ class Agent:
|
|
|
1506
1543
|
async for _ in pre_hook_iterator:
|
|
1507
1544
|
pass
|
|
1508
1545
|
|
|
1509
|
-
|
|
1546
|
+
# 5. Determine tools for model
|
|
1547
|
+
self.model = cast(Model, self.model)
|
|
1548
|
+
await self._adetermine_tools_for_model(
|
|
1510
1549
|
model=self.model,
|
|
1511
1550
|
run_response=run_response,
|
|
1512
|
-
session=
|
|
1551
|
+
session=agent_session,
|
|
1513
1552
|
session_state=session_state,
|
|
1514
1553
|
dependencies=dependencies,
|
|
1515
1554
|
user_id=user_id,
|
|
@@ -1517,11 +1556,11 @@ class Agent:
|
|
|
1517
1556
|
knowledge_filters=knowledge_filters,
|
|
1518
1557
|
)
|
|
1519
1558
|
|
|
1520
|
-
#
|
|
1521
|
-
run_messages: RunMessages = self.
|
|
1559
|
+
# 6. Prepare run messages
|
|
1560
|
+
run_messages: RunMessages = await self._aget_run_messages(
|
|
1522
1561
|
run_response=run_response,
|
|
1523
1562
|
input=run_input.input_content,
|
|
1524
|
-
session=
|
|
1563
|
+
session=agent_session,
|
|
1525
1564
|
session_state=session_state,
|
|
1526
1565
|
user_id=user_id,
|
|
1527
1566
|
audio=run_input.audios,
|
|
@@ -1539,110 +1578,133 @@ class Agent:
|
|
|
1539
1578
|
if len(run_messages.messages) == 0:
|
|
1540
1579
|
log_error("No messages to be sent to the model.")
|
|
1541
1580
|
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
# Check for cancellation before model call
|
|
1548
|
-
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1581
|
+
try:
|
|
1582
|
+
# 7. Reason about the task if reasoning is enabled
|
|
1583
|
+
await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
1584
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1549
1585
|
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1586
|
+
# 8. Generate a response from the Model (includes running function calls)
|
|
1587
|
+
model_response: ModelResponse = await self.model.aresponse(
|
|
1588
|
+
messages=run_messages.messages,
|
|
1589
|
+
tools=self._tools_for_model,
|
|
1590
|
+
functions=self._functions_for_model,
|
|
1591
|
+
tool_choice=self.tool_choice,
|
|
1592
|
+
tool_call_limit=self.tool_call_limit,
|
|
1593
|
+
response_format=response_format,
|
|
1594
|
+
send_media_to_model=self.send_media_to_model,
|
|
1595
|
+
)
|
|
1596
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1560
1597
|
|
|
1561
|
-
|
|
1562
|
-
|
|
1598
|
+
# If an output model is provided, generate output using the output model
|
|
1599
|
+
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
1600
|
+
# If a parser model is provided, structure the response separately
|
|
1601
|
+
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1563
1602
|
|
|
1564
|
-
|
|
1565
|
-
|
|
1603
|
+
# 9. Update the RunOutput with the model response
|
|
1604
|
+
self._update_run_response(
|
|
1605
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
1606
|
+
)
|
|
1566
1607
|
|
|
1567
|
-
|
|
1568
|
-
|
|
1608
|
+
# Optional: Store media
|
|
1609
|
+
if self.store_media:
|
|
1610
|
+
self._store_media(run_response, model_response)
|
|
1569
1611
|
|
|
1570
|
-
|
|
1571
|
-
|
|
1612
|
+
# Break out of the run function if a tool call is paused
|
|
1613
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1614
|
+
return self._handle_agent_run_paused(
|
|
1615
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
1616
|
+
)
|
|
1617
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1572
1618
|
|
|
1573
|
-
|
|
1574
|
-
self.
|
|
1575
|
-
else:
|
|
1576
|
-
self._scrub_media_from_run_output(run_response)
|
|
1619
|
+
# 10. Calculate session metrics
|
|
1620
|
+
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
1577
1621
|
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
return self._handle_agent_run_paused(
|
|
1581
|
-
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
1582
|
-
)
|
|
1622
|
+
# Set the run status to completed
|
|
1623
|
+
run_response.status = RunStatus.completed
|
|
1583
1624
|
|
|
1584
|
-
|
|
1625
|
+
# Convert the response to the structured format if needed
|
|
1626
|
+
self._convert_response_to_structured_format(run_response)
|
|
1585
1627
|
|
|
1586
|
-
|
|
1587
|
-
|
|
1628
|
+
# Set the run duration
|
|
1629
|
+
if run_response.metrics:
|
|
1630
|
+
run_response.metrics.stop_timer()
|
|
1588
1631
|
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1632
|
+
# 10. Execute post-hooks (after output is generated but before response is returned)
|
|
1633
|
+
if self.post_hooks is not None:
|
|
1634
|
+
await self._aexecute_post_hooks(
|
|
1635
|
+
hooks=self.post_hooks, # type: ignore
|
|
1636
|
+
run_output=run_response,
|
|
1637
|
+
session=agent_session,
|
|
1638
|
+
user_id=user_id,
|
|
1639
|
+
debug_mode=debug_mode,
|
|
1640
|
+
**kwargs,
|
|
1641
|
+
)
|
|
1592
1642
|
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
session=session,
|
|
1643
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
1644
|
+
self.save_run_response_to_file(
|
|
1645
|
+
run_response=run_response,
|
|
1646
|
+
input=run_messages.user_message,
|
|
1647
|
+
session_id=agent_session.session_id,
|
|
1599
1648
|
user_id=user_id,
|
|
1600
|
-
debug_mode=debug_mode,
|
|
1601
|
-
**kwargs,
|
|
1602
1649
|
)
|
|
1603
1650
|
|
|
1604
|
-
|
|
1605
|
-
|
|
1651
|
+
# 11. Add RunOutput to Agent Session
|
|
1652
|
+
agent_session.upsert_run(run=run_response)
|
|
1606
1653
|
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1654
|
+
# 12. Update Agent Memory
|
|
1655
|
+
async for _ in self._amake_memories_and_summaries(
|
|
1656
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
1657
|
+
):
|
|
1658
|
+
pass
|
|
1611
1659
|
|
|
1612
|
-
|
|
1613
|
-
|
|
1660
|
+
# 13. Scrub the stored run based on storage flags
|
|
1661
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1662
|
+
agent_session.upsert_run(run=run_response)
|
|
1614
1663
|
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1664
|
+
# 14. Save session to storage
|
|
1665
|
+
if self._has_async_db():
|
|
1666
|
+
await self.asave_session(session=agent_session)
|
|
1667
|
+
else:
|
|
1668
|
+
self.save_session(session=agent_session)
|
|
1620
1669
|
|
|
1621
|
-
|
|
1622
|
-
|
|
1670
|
+
# Log Agent Telemetry
|
|
1671
|
+
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
1623
1672
|
|
|
1624
|
-
|
|
1625
|
-
await self._alog_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
1673
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
1626
1674
|
|
|
1627
|
-
|
|
1675
|
+
return run_response
|
|
1628
1676
|
|
|
1629
|
-
|
|
1630
|
-
|
|
1677
|
+
except RunCancelledException as e:
|
|
1678
|
+
# Handle run cancellation
|
|
1679
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
1680
|
+
run_response.content = str(e)
|
|
1681
|
+
run_response.status = RunStatus.cancelled
|
|
1631
1682
|
|
|
1632
|
-
|
|
1683
|
+
# Update the Agent Session before exiting
|
|
1684
|
+
agent_session.upsert_run(run=run_response)
|
|
1685
|
+
if self._has_async_db():
|
|
1686
|
+
await self.asave_session(session=agent_session)
|
|
1687
|
+
else:
|
|
1688
|
+
self.save_session(session=agent_session)
|
|
1689
|
+
|
|
1690
|
+
return run_response
|
|
1691
|
+
|
|
1692
|
+
finally:
|
|
1693
|
+
# Always clean up the run tracking
|
|
1694
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
1633
1695
|
|
|
1634
1696
|
async def _arun_stream(
|
|
1635
1697
|
self,
|
|
1636
1698
|
run_response: RunOutput,
|
|
1637
|
-
|
|
1699
|
+
session_id: str,
|
|
1638
1700
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1639
1701
|
user_id: Optional[str] = None,
|
|
1640
1702
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1703
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
1641
1704
|
add_history_to_context: Optional[bool] = None,
|
|
1642
1705
|
add_dependencies_to_context: Optional[bool] = None,
|
|
1643
1706
|
add_session_state_to_context: Optional[bool] = None,
|
|
1644
1707
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1645
|
-
dependencies: Optional[Dict[str, Any]] = None,
|
|
1646
1708
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1647
1709
|
stream_intermediate_steps: bool = False,
|
|
1648
1710
|
yield_run_response: Optional[bool] = None,
|
|
@@ -1652,33 +1714,51 @@ class Agent:
|
|
|
1652
1714
|
"""Run the Agent and yield the RunOutput.
|
|
1653
1715
|
|
|
1654
1716
|
Steps:
|
|
1655
|
-
1.
|
|
1656
|
-
2.
|
|
1657
|
-
3.
|
|
1658
|
-
4.
|
|
1659
|
-
5.
|
|
1660
|
-
6.
|
|
1661
|
-
7.
|
|
1662
|
-
8.
|
|
1663
|
-
9.
|
|
1664
|
-
10.
|
|
1717
|
+
1. Read or create session
|
|
1718
|
+
2. Update metadata and session state
|
|
1719
|
+
3. Resolve dependencies
|
|
1720
|
+
4. Execute pre-hooks
|
|
1721
|
+
5. Determine tools for model
|
|
1722
|
+
6. Prepare run messages
|
|
1723
|
+
7. Reason about the task if reasoning is enabled
|
|
1724
|
+
8. Generate a response from the Model (includes running function calls)
|
|
1725
|
+
9. Calculate session metrics
|
|
1726
|
+
10. Add RunOutput to Agent Session
|
|
1727
|
+
11. Update Agent Memory
|
|
1728
|
+
12. Create the run completed event
|
|
1729
|
+
13. Save session to storage
|
|
1665
1730
|
"""
|
|
1731
|
+
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1732
|
+
|
|
1733
|
+
# Start the Run by yielding a RunStarted event
|
|
1734
|
+
if stream_intermediate_steps:
|
|
1735
|
+
yield self._handle_event(create_run_started_event(run_response), run_response)
|
|
1736
|
+
|
|
1737
|
+
# 1. Read or create session. Reads from the database if provided.
|
|
1738
|
+
if self._has_async_db():
|
|
1739
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1740
|
+
else:
|
|
1741
|
+
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1742
|
+
|
|
1743
|
+
# 2. Update metadata and session state
|
|
1744
|
+
self._update_metadata(session=agent_session)
|
|
1745
|
+
if session_state is not None:
|
|
1746
|
+
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1666
1747
|
|
|
1667
|
-
#
|
|
1748
|
+
# 3. Resolve dependencies
|
|
1668
1749
|
if dependencies is not None:
|
|
1669
1750
|
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
1670
1751
|
|
|
1671
|
-
#
|
|
1752
|
+
# 4. Execute pre-hooks
|
|
1672
1753
|
run_input = cast(RunInput, run_response.input)
|
|
1673
1754
|
self.model = cast(Model, self.model)
|
|
1674
|
-
|
|
1675
1755
|
if self.pre_hooks is not None:
|
|
1676
1756
|
# Can modify the run input
|
|
1677
1757
|
pre_hook_iterator = self._aexecute_pre_hooks(
|
|
1678
1758
|
hooks=self.pre_hooks, # type: ignore
|
|
1679
1759
|
run_response=run_response,
|
|
1680
1760
|
run_input=run_input,
|
|
1681
|
-
session=
|
|
1761
|
+
session=agent_session,
|
|
1682
1762
|
user_id=user_id,
|
|
1683
1763
|
debug_mode=debug_mode,
|
|
1684
1764
|
**kwargs,
|
|
@@ -1686,22 +1766,24 @@ class Agent:
|
|
|
1686
1766
|
async for event in pre_hook_iterator:
|
|
1687
1767
|
yield event
|
|
1688
1768
|
|
|
1769
|
+
# 5. Determine tools for model
|
|
1770
|
+
self.model = cast(Model, self.model)
|
|
1689
1771
|
self._determine_tools_for_model(
|
|
1690
1772
|
model=self.model,
|
|
1691
1773
|
run_response=run_response,
|
|
1692
|
-
session=
|
|
1774
|
+
session=agent_session,
|
|
1693
1775
|
session_state=session_state,
|
|
1694
|
-
dependencies=dependencies,
|
|
1695
1776
|
user_id=user_id,
|
|
1696
1777
|
async_mode=True,
|
|
1697
1778
|
knowledge_filters=knowledge_filters,
|
|
1779
|
+
dependencies=dependencies,
|
|
1698
1780
|
)
|
|
1699
1781
|
|
|
1700
|
-
#
|
|
1701
|
-
run_messages: RunMessages = self.
|
|
1782
|
+
# 6. Prepare run messages
|
|
1783
|
+
run_messages: RunMessages = await self._aget_run_messages(
|
|
1702
1784
|
run_response=run_response,
|
|
1703
1785
|
input=run_input.input_content,
|
|
1704
|
-
session=
|
|
1786
|
+
session=agent_session,
|
|
1705
1787
|
session_state=session_state,
|
|
1706
1788
|
user_id=user_id,
|
|
1707
1789
|
audio=run_input.audios,
|
|
@@ -1716,29 +1798,23 @@ class Agent:
|
|
|
1716
1798
|
metadata=metadata,
|
|
1717
1799
|
**kwargs,
|
|
1718
1800
|
)
|
|
1719
|
-
|
|
1720
|
-
|
|
1801
|
+
if len(run_messages.messages) == 0:
|
|
1802
|
+
log_error("No messages to be sent to the model.")
|
|
1721
1803
|
|
|
1722
1804
|
# Register run for cancellation tracking
|
|
1723
1805
|
register_run(run_response.run_id) # type: ignore
|
|
1724
1806
|
|
|
1725
1807
|
try:
|
|
1726
|
-
#
|
|
1727
|
-
if stream_intermediate_steps:
|
|
1728
|
-
yield self._handle_event(create_run_started_event(run_response), run_response)
|
|
1729
|
-
|
|
1730
|
-
# 4. Reason about the task if reasoning is enabled
|
|
1808
|
+
# 7. Reason about the task if reasoning is enabled
|
|
1731
1809
|
async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
|
|
1732
1810
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1733
1811
|
yield item
|
|
1734
|
-
|
|
1735
|
-
# Check for cancellation before model processing
|
|
1736
1812
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1737
1813
|
|
|
1738
|
-
#
|
|
1814
|
+
# 8. Generate a response from the Model
|
|
1739
1815
|
if self.output_model is None:
|
|
1740
1816
|
async for event in self._ahandle_model_response_stream(
|
|
1741
|
-
session=
|
|
1817
|
+
session=agent_session,
|
|
1742
1818
|
run_response=run_response,
|
|
1743
1819
|
run_messages=run_messages,
|
|
1744
1820
|
response_format=response_format,
|
|
@@ -1753,7 +1829,7 @@ class Agent:
|
|
|
1753
1829
|
) # type: ignore
|
|
1754
1830
|
|
|
1755
1831
|
async for event in self._ahandle_model_response_stream(
|
|
1756
|
-
session=
|
|
1832
|
+
session=agent_session,
|
|
1757
1833
|
run_response=run_response,
|
|
1758
1834
|
run_messages=run_messages,
|
|
1759
1835
|
response_format=response_format,
|
|
@@ -1771,7 +1847,7 @@ class Agent:
|
|
|
1771
1847
|
|
|
1772
1848
|
# If an output model is provided, generate output using the output model
|
|
1773
1849
|
async for event in self._agenerate_response_with_output_model_stream(
|
|
1774
|
-
session=
|
|
1850
|
+
session=agent_session,
|
|
1775
1851
|
run_response=run_response,
|
|
1776
1852
|
run_messages=run_messages,
|
|
1777
1853
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
@@ -1784,51 +1860,59 @@ class Agent:
|
|
|
1784
1860
|
|
|
1785
1861
|
# If a parser model is provided, structure the response separately
|
|
1786
1862
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
1787
|
-
session=
|
|
1863
|
+
session=agent_session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
|
|
1788
1864
|
):
|
|
1789
1865
|
yield event
|
|
1790
1866
|
|
|
1791
|
-
#
|
|
1867
|
+
# Break out of the run function if a tool call is paused
|
|
1792
1868
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1793
1869
|
for item in self._handle_agent_run_paused_stream(
|
|
1794
|
-
run_response=run_response, run_messages=run_messages, session=
|
|
1870
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
1795
1871
|
):
|
|
1796
1872
|
yield item
|
|
1797
1873
|
return
|
|
1798
1874
|
|
|
1875
|
+
# Set the run status to completed
|
|
1799
1876
|
run_response.status = RunStatus.completed
|
|
1800
1877
|
|
|
1801
1878
|
# Set the run duration
|
|
1802
1879
|
if run_response.metrics:
|
|
1803
1880
|
run_response.metrics.stop_timer()
|
|
1804
1881
|
|
|
1805
|
-
#
|
|
1806
|
-
self._update_session_metrics(session=
|
|
1882
|
+
# 9. Calculate session metrics
|
|
1883
|
+
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
1807
1884
|
|
|
1808
1885
|
# Optional: Save output to file if save_response_to_file is set
|
|
1809
1886
|
self.save_run_response_to_file(
|
|
1810
1887
|
run_response=run_response,
|
|
1811
1888
|
input=run_messages.user_message,
|
|
1812
|
-
session_id=
|
|
1889
|
+
session_id=agent_session.session_id,
|
|
1813
1890
|
user_id=user_id,
|
|
1814
1891
|
)
|
|
1815
1892
|
|
|
1816
|
-
#
|
|
1817
|
-
|
|
1893
|
+
# 10. Add RunOutput to Agent Session
|
|
1894
|
+
agent_session.upsert_run(run=run_response)
|
|
1818
1895
|
|
|
1819
|
-
#
|
|
1896
|
+
# 11. Update Agent Memory
|
|
1820
1897
|
async for event in self._amake_memories_and_summaries(
|
|
1821
|
-
run_response=run_response, run_messages=run_messages, session=
|
|
1898
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
1822
1899
|
):
|
|
1823
1900
|
yield event
|
|
1824
1901
|
|
|
1825
|
-
#
|
|
1902
|
+
# 12. Create the run completed event
|
|
1826
1903
|
completed_event = self._handle_event(
|
|
1827
1904
|
create_run_completed_event(from_run_response=run_response), run_response
|
|
1828
1905
|
)
|
|
1829
1906
|
|
|
1830
|
-
#
|
|
1831
|
-
self.
|
|
1907
|
+
# 13. Scrub the stored run based on storage flags
|
|
1908
|
+
if self._scrub_run_output_for_storage(run_response):
|
|
1909
|
+
agent_session.upsert_run(run=run_response)
|
|
1910
|
+
|
|
1911
|
+
# 14. Save session to storage
|
|
1912
|
+
if self._has_async_db():
|
|
1913
|
+
await self.asave_session(session=agent_session)
|
|
1914
|
+
else:
|
|
1915
|
+
self.save_session(session=agent_session)
|
|
1832
1916
|
|
|
1833
1917
|
if stream_intermediate_steps:
|
|
1834
1918
|
yield completed_event
|
|
@@ -1837,7 +1921,7 @@ class Agent:
|
|
|
1837
1921
|
yield run_response
|
|
1838
1922
|
|
|
1839
1923
|
# Log Agent Telemetry
|
|
1840
|
-
await self._alog_agent_telemetry(session_id=
|
|
1924
|
+
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
1841
1925
|
|
|
1842
1926
|
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
1843
1927
|
|
|
@@ -1854,8 +1938,11 @@ class Agent:
|
|
|
1854
1938
|
)
|
|
1855
1939
|
|
|
1856
1940
|
# Add the RunOutput to Agent Session even when cancelled
|
|
1857
|
-
|
|
1858
|
-
self.
|
|
1941
|
+
agent_session.upsert_run(run=run_response)
|
|
1942
|
+
if self._has_async_db():
|
|
1943
|
+
await self.asave_session(session=agent_session)
|
|
1944
|
+
else:
|
|
1945
|
+
self.save_session(session=agent_session)
|
|
1859
1946
|
finally:
|
|
1860
1947
|
# Always clean up the run tracking
|
|
1861
1948
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -1939,7 +2026,7 @@ class Agent:
|
|
|
1939
2026
|
# Create a run_id for this specific run
|
|
1940
2027
|
run_id = str(uuid4())
|
|
1941
2028
|
|
|
1942
|
-
# Validate input against input_schema if provided
|
|
2029
|
+
# 2. Validate input against input_schema if provided
|
|
1943
2030
|
validated_input = self._validate_input(input)
|
|
1944
2031
|
|
|
1945
2032
|
# Normalise hook & guardails
|
|
@@ -1950,6 +2037,7 @@ class Agent:
|
|
|
1950
2037
|
self.post_hooks = normalize_hooks(self.post_hooks, async_mode=True)
|
|
1951
2038
|
self._hooks_normalised = True
|
|
1952
2039
|
|
|
2040
|
+
# Initialize session
|
|
1953
2041
|
session_id, user_id, session_state = self._initialize_session(
|
|
1954
2042
|
run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
|
|
1955
2043
|
)
|
|
@@ -1961,25 +2049,8 @@ class Agent:
|
|
|
1961
2049
|
images=images, videos=videos, audios=audio, files=files
|
|
1962
2050
|
)
|
|
1963
2051
|
|
|
1964
|
-
#
|
|
1965
|
-
run_input = RunInput(
|
|
1966
|
-
input_content=validated_input,
|
|
1967
|
-
images=image_artifacts,
|
|
1968
|
-
videos=video_artifacts,
|
|
1969
|
-
audios=audio_artifacts,
|
|
1970
|
-
files=file_artifacts,
|
|
1971
|
-
)
|
|
1972
|
-
|
|
1973
|
-
# Read existing session from storage
|
|
1974
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1975
|
-
self._update_metadata(session=agent_session)
|
|
1976
|
-
|
|
1977
|
-
# Update session state from DB
|
|
1978
|
-
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1979
|
-
|
|
1980
|
-
# Determine run dependencies
|
|
2052
|
+
# Resolve variables
|
|
1981
2053
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
1982
|
-
|
|
1983
2054
|
add_dependencies = (
|
|
1984
2055
|
add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
|
|
1985
2056
|
)
|
|
@@ -1990,10 +2061,14 @@ class Agent:
|
|
|
1990
2061
|
)
|
|
1991
2062
|
add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
|
|
1992
2063
|
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
2064
|
+
# Create RunInput to capture the original user input
|
|
2065
|
+
run_input = RunInput(
|
|
2066
|
+
input_content=validated_input,
|
|
2067
|
+
images=image_artifacts,
|
|
2068
|
+
videos=video_artifacts,
|
|
2069
|
+
audios=audio_artifacts,
|
|
2070
|
+
files=files,
|
|
2071
|
+
)
|
|
1997
2072
|
|
|
1998
2073
|
# Use stream override value when necessary
|
|
1999
2074
|
if stream is None:
|
|
@@ -2015,6 +2090,11 @@ class Agent:
|
|
|
2015
2090
|
response_format = self._get_response_format() if self.parser_model is None else None
|
|
2016
2091
|
self.model = cast(Model, self.model)
|
|
2017
2092
|
|
|
2093
|
+
# Get knowledge filters
|
|
2094
|
+
effective_filters = knowledge_filters
|
|
2095
|
+
if self.knowledge_filters or knowledge_filters:
|
|
2096
|
+
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2097
|
+
|
|
2018
2098
|
# Merge agent metadata with run metadata
|
|
2019
2099
|
if self.metadata is not None:
|
|
2020
2100
|
if metadata is None:
|
|
@@ -2052,18 +2132,18 @@ class Agent:
|
|
|
2052
2132
|
if stream:
|
|
2053
2133
|
return self._arun_stream( # type: ignore
|
|
2054
2134
|
run_response=run_response,
|
|
2055
|
-
session=agent_session,
|
|
2056
2135
|
user_id=user_id,
|
|
2136
|
+
response_format=response_format,
|
|
2137
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
2138
|
+
yield_run_response=yield_run_response,
|
|
2139
|
+
dependencies=run_dependencies,
|
|
2140
|
+
session_id=session_id,
|
|
2057
2141
|
session_state=session_state,
|
|
2058
2142
|
knowledge_filters=effective_filters,
|
|
2059
2143
|
add_history_to_context=add_history,
|
|
2060
2144
|
add_dependencies_to_context=add_dependencies,
|
|
2061
2145
|
add_session_state_to_context=add_session_state,
|
|
2062
2146
|
metadata=metadata,
|
|
2063
|
-
response_format=response_format,
|
|
2064
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2065
|
-
yield_run_response=yield_run_response,
|
|
2066
|
-
dependencies=run_dependencies,
|
|
2067
2147
|
debug_mode=debug_mode,
|
|
2068
2148
|
**kwargs,
|
|
2069
2149
|
) # type: ignore[assignment]
|
|
@@ -2071,17 +2151,15 @@ class Agent:
|
|
|
2071
2151
|
return self._arun( # type: ignore
|
|
2072
2152
|
run_response=run_response,
|
|
2073
2153
|
user_id=user_id,
|
|
2074
|
-
|
|
2154
|
+
response_format=response_format,
|
|
2155
|
+
dependencies=run_dependencies,
|
|
2156
|
+
session_id=session_id,
|
|
2075
2157
|
session_state=session_state,
|
|
2076
|
-
knowledge_filters=
|
|
2158
|
+
knowledge_filters=effective_filters,
|
|
2077
2159
|
add_history_to_context=add_history,
|
|
2078
2160
|
add_dependencies_to_context=add_dependencies,
|
|
2079
2161
|
add_session_state_to_context=add_session_state,
|
|
2080
2162
|
metadata=metadata,
|
|
2081
|
-
response_format=response_format,
|
|
2082
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2083
|
-
yield_run_response=yield_run_response,
|
|
2084
|
-
dependencies=run_dependencies,
|
|
2085
2163
|
debug_mode=debug_mode,
|
|
2086
2164
|
**kwargs,
|
|
2087
2165
|
)
|
|
@@ -2102,17 +2180,6 @@ class Agent:
|
|
|
2102
2180
|
import time
|
|
2103
2181
|
|
|
2104
2182
|
time.sleep(delay)
|
|
2105
|
-
except RunCancelledException as e:
|
|
2106
|
-
# Handle run cancellation
|
|
2107
|
-
log_info(f"Run {run_response.run_id} was cancelled")
|
|
2108
|
-
run_response.content = str(e)
|
|
2109
|
-
run_response.status = RunStatus.cancelled
|
|
2110
|
-
|
|
2111
|
-
# Add the RunOutput to Agent Session even when cancelled
|
|
2112
|
-
agent_session.upsert_run(run=run_response)
|
|
2113
|
-
self.save_session(session=agent_session)
|
|
2114
|
-
|
|
2115
|
-
return run_response
|
|
2116
2183
|
except KeyboardInterrupt:
|
|
2117
2184
|
run_response.content = "Operation cancelled by user"
|
|
2118
2185
|
run_response.status = RunStatus.cancelled
|
|
@@ -2209,6 +2276,9 @@ class Agent:
|
|
|
2209
2276
|
if run_response is None and (run_id is not None and (session_id is None and self.session_id is None)):
|
|
2210
2277
|
raise ValueError("Session ID is required to continue a run from a run_id.")
|
|
2211
2278
|
|
|
2279
|
+
if self._has_async_db():
|
|
2280
|
+
raise Exception("continue_run() is not supported with an async DB. Please use acontinue_arun() instead.")
|
|
2281
|
+
|
|
2212
2282
|
session_id = run_response.session_id if run_response else session_id
|
|
2213
2283
|
|
|
2214
2284
|
session_id, user_id, session_state = self._initialize_session(
|
|
@@ -2589,6 +2659,7 @@ class Agent:
|
|
|
2589
2659
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
2590
2660
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2591
2661
|
debug_mode: Optional[bool] = None,
|
|
2662
|
+
yield_run_response: bool = False,
|
|
2592
2663
|
**kwargs,
|
|
2593
2664
|
) -> Union[RunOutput, AsyncIterator[Union[RunOutputEvent, RunOutput]]]:
|
|
2594
2665
|
"""Continue a previous run.
|
|
@@ -2621,21 +2692,8 @@ class Agent:
|
|
|
2621
2692
|
# Initialize the Agent
|
|
2622
2693
|
self.initialize_agent(debug_mode=debug_mode)
|
|
2623
2694
|
|
|
2624
|
-
# Read existing session from storage
|
|
2625
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2626
|
-
self._update_metadata(session=agent_session)
|
|
2627
|
-
|
|
2628
|
-
# Update session state from DB
|
|
2629
|
-
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
2630
|
-
|
|
2631
2695
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
2632
2696
|
|
|
2633
|
-
effective_filters = knowledge_filters
|
|
2634
|
-
|
|
2635
|
-
# When filters are passed manually
|
|
2636
|
-
if self.knowledge_filters or knowledge_filters:
|
|
2637
|
-
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2638
|
-
|
|
2639
2697
|
# If no retries are set, use the agent's default retries
|
|
2640
2698
|
retries = retries if retries is not None else self.retries
|
|
2641
2699
|
|
|
@@ -2655,70 +2713,42 @@ class Agent:
|
|
|
2655
2713
|
self.stream = self.stream or stream
|
|
2656
2714
|
self.stream_intermediate_steps = self.stream_intermediate_steps or (stream_intermediate_steps and self.stream)
|
|
2657
2715
|
|
|
2658
|
-
#
|
|
2659
|
-
|
|
2660
|
-
|
|
2661
|
-
|
|
2662
|
-
elif run_id is not None:
|
|
2663
|
-
# The run is continued from a run_id. This requires the updated tools to be passed.
|
|
2664
|
-
if updated_tools is None:
|
|
2665
|
-
raise ValueError("Updated tools are required to continue a run from a run_id.")
|
|
2666
|
-
|
|
2667
|
-
runs = agent_session.runs
|
|
2668
|
-
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
|
|
2669
|
-
if run_response is None:
|
|
2670
|
-
raise RuntimeError(f"No runs found for run ID {run_id}")
|
|
2671
|
-
run_response.tools = updated_tools
|
|
2672
|
-
input = run_response.messages or []
|
|
2673
|
-
else:
|
|
2674
|
-
raise ValueError("Either run_response or run_id must be provided.")
|
|
2716
|
+
# Get knowledge filters
|
|
2717
|
+
effective_filters = knowledge_filters
|
|
2718
|
+
if self.knowledge_filters or knowledge_filters:
|
|
2719
|
+
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2675
2720
|
|
|
2676
2721
|
# Prepare arguments for the model
|
|
2677
2722
|
response_format = self._get_response_format()
|
|
2678
2723
|
self.model = cast(Model, self.model)
|
|
2679
2724
|
|
|
2680
|
-
self._determine_tools_for_model(
|
|
2681
|
-
model=self.model,
|
|
2682
|
-
run_response=run_response,
|
|
2683
|
-
session=agent_session,
|
|
2684
|
-
session_state=session_state,
|
|
2685
|
-
user_id=user_id,
|
|
2686
|
-
async_mode=True,
|
|
2687
|
-
knowledge_filters=effective_filters,
|
|
2688
|
-
)
|
|
2689
|
-
|
|
2690
2725
|
last_exception = None
|
|
2691
2726
|
num_attempts = retries + 1
|
|
2692
2727
|
for attempt in range(num_attempts):
|
|
2693
|
-
run_response = cast(RunOutput, run_response)
|
|
2694
|
-
|
|
2695
|
-
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
2696
|
-
|
|
2697
|
-
# Prepare run messages
|
|
2698
|
-
run_messages: RunMessages = self._get_continue_run_messages(
|
|
2699
|
-
input=input,
|
|
2700
|
-
)
|
|
2701
|
-
|
|
2702
|
-
# Reset the run paused state
|
|
2703
|
-
run_response.status = RunStatus.running
|
|
2704
|
-
|
|
2705
2728
|
try:
|
|
2706
2729
|
if stream:
|
|
2707
2730
|
return self._acontinue_run_stream(
|
|
2708
2731
|
run_response=run_response,
|
|
2709
|
-
|
|
2732
|
+
updated_tools=updated_tools,
|
|
2733
|
+
knowledge_filters=effective_filters,
|
|
2734
|
+
session_state=session_state,
|
|
2735
|
+
run_id=run_id,
|
|
2710
2736
|
user_id=user_id,
|
|
2711
|
-
|
|
2737
|
+
session_id=session_id,
|
|
2712
2738
|
response_format=response_format,
|
|
2713
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2714
2739
|
dependencies=run_dependencies,
|
|
2740
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
2741
|
+
yield_run_response=yield_run_response,
|
|
2715
2742
|
)
|
|
2716
2743
|
else:
|
|
2717
2744
|
return self._acontinue_run( # type: ignore
|
|
2745
|
+
session_id=session_id,
|
|
2718
2746
|
run_response=run_response,
|
|
2719
|
-
|
|
2747
|
+
updated_tools=updated_tools,
|
|
2748
|
+
knowledge_filters=effective_filters,
|
|
2749
|
+
session_state=session_state,
|
|
2750
|
+
run_id=run_id,
|
|
2720
2751
|
user_id=user_id,
|
|
2721
|
-
session=agent_session,
|
|
2722
2752
|
response_format=response_format,
|
|
2723
2753
|
dependencies=run_dependencies,
|
|
2724
2754
|
debug_mode=debug_mode,
|
|
@@ -2738,6 +2768,7 @@ class Agent:
|
|
|
2738
2768
|
|
|
2739
2769
|
time.sleep(delay)
|
|
2740
2770
|
except KeyboardInterrupt:
|
|
2771
|
+
run_response = cast(RunOutput, run_response)
|
|
2741
2772
|
if stream:
|
|
2742
2773
|
return async_generator_wrapper( # type: ignore
|
|
2743
2774
|
create_run_cancelled_event(run_response, "Operation cancelled by user")
|
|
@@ -2762,9 +2793,12 @@ class Agent:
|
|
|
2762
2793
|
|
|
2763
2794
|
async def _acontinue_run(
|
|
2764
2795
|
self,
|
|
2765
|
-
|
|
2766
|
-
|
|
2767
|
-
|
|
2796
|
+
session_id: str,
|
|
2797
|
+
run_response: Optional[RunOutput] = None,
|
|
2798
|
+
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2799
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
2800
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
2801
|
+
run_id: Optional[str] = None,
|
|
2768
2802
|
user_id: Optional[str] = None,
|
|
2769
2803
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2770
2804
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
@@ -2774,175 +2808,408 @@ class Agent:
|
|
|
2774
2808
|
"""Continue a previous run.
|
|
2775
2809
|
|
|
2776
2810
|
Steps:
|
|
2777
|
-
1.
|
|
2778
|
-
2.
|
|
2779
|
-
3.
|
|
2780
|
-
4.
|
|
2781
|
-
5.
|
|
2782
|
-
6.
|
|
2783
|
-
7.
|
|
2811
|
+
1. Read existing session from db
|
|
2812
|
+
2. Resolve dependencies
|
|
2813
|
+
3. Update metadata and session state
|
|
2814
|
+
4. Prepare run response
|
|
2815
|
+
5. Determine tools for model
|
|
2816
|
+
6. Prepare run messages
|
|
2817
|
+
7. Handle the updated tools
|
|
2818
|
+
8. Get model response
|
|
2819
|
+
9. Update the RunOutput with the model response
|
|
2820
|
+
10. Calculate session metrics
|
|
2821
|
+
11. Execute post-hooks
|
|
2822
|
+
12. Update Agent Memory
|
|
2823
|
+
13. Save session to storage
|
|
2784
2824
|
"""
|
|
2785
|
-
|
|
2825
|
+
log_debug(f"Agent Run Continue: {run_response.run_id}", center=True) # type: ignore
|
|
2826
|
+
|
|
2827
|
+
# 1. Read existing session from db
|
|
2828
|
+
if self._has_async_db():
|
|
2829
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
2830
|
+
else:
|
|
2831
|
+
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2832
|
+
|
|
2833
|
+
# 2. Resolve dependencies
|
|
2786
2834
|
if dependencies is not None:
|
|
2787
|
-
await self._aresolve_run_dependencies(dependencies)
|
|
2835
|
+
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
2788
2836
|
|
|
2789
|
-
|
|
2837
|
+
# 3. Update metadata and session state
|
|
2838
|
+
self._update_metadata(session=agent_session)
|
|
2839
|
+
if session_state is not None:
|
|
2840
|
+
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
2790
2841
|
|
|
2791
|
-
#
|
|
2792
|
-
|
|
2842
|
+
# 4. Prepare run response
|
|
2843
|
+
if run_response is not None:
|
|
2844
|
+
# The run is continued from a provided run_response. This contains the updated tools.
|
|
2845
|
+
input = run_response.messages or []
|
|
2846
|
+
elif run_id is not None:
|
|
2847
|
+
# The run is continued from a run_id. This requires the updated tools to be passed.
|
|
2848
|
+
if updated_tools is None:
|
|
2849
|
+
raise ValueError("Updated tools are required to continue a run from a run_id.")
|
|
2793
2850
|
|
|
2794
|
-
|
|
2795
|
-
|
|
2796
|
-
|
|
2797
|
-
|
|
2798
|
-
tools=
|
|
2799
|
-
|
|
2800
|
-
|
|
2801
|
-
|
|
2851
|
+
runs = agent_session.runs
|
|
2852
|
+
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
|
|
2853
|
+
if run_response is None:
|
|
2854
|
+
raise RuntimeError(f"No runs found for run ID {run_id}")
|
|
2855
|
+
run_response.tools = updated_tools
|
|
2856
|
+
input = run_response.messages or []
|
|
2857
|
+
else:
|
|
2858
|
+
raise ValueError("Either run_response or run_id must be provided.")
|
|
2859
|
+
|
|
2860
|
+
run_response = cast(RunOutput, run_response)
|
|
2861
|
+
run_response.status = RunStatus.running
|
|
2862
|
+
|
|
2863
|
+
# 5. Determine tools for model
|
|
2864
|
+
self.model = cast(Model, self.model)
|
|
2865
|
+
await self._adetermine_tools_for_model(
|
|
2866
|
+
model=self.model,
|
|
2867
|
+
run_response=run_response,
|
|
2868
|
+
session=agent_session,
|
|
2869
|
+
session_state=session_state,
|
|
2870
|
+
dependencies=dependencies,
|
|
2871
|
+
user_id=user_id,
|
|
2872
|
+
async_mode=True,
|
|
2873
|
+
knowledge_filters=knowledge_filters,
|
|
2802
2874
|
)
|
|
2803
2875
|
|
|
2804
|
-
|
|
2876
|
+
# 6. Prepare run messages
|
|
2877
|
+
run_messages: RunMessages = self._get_continue_run_messages(
|
|
2878
|
+
input=input,
|
|
2879
|
+
)
|
|
2805
2880
|
|
|
2806
|
-
#
|
|
2807
|
-
|
|
2808
|
-
|
|
2809
|
-
|
|
2881
|
+
# Register run for cancellation tracking
|
|
2882
|
+
register_run(run_response.run_id) # type: ignore
|
|
2883
|
+
|
|
2884
|
+
try:
|
|
2885
|
+
# 7. Handle the updated tools
|
|
2886
|
+
await self._ahandle_tool_call_updates(run_response=run_response, run_messages=run_messages)
|
|
2887
|
+
|
|
2888
|
+
# 8. Get model response
|
|
2889
|
+
model_response: ModelResponse = await self.model.aresponse(
|
|
2890
|
+
messages=run_messages.messages,
|
|
2891
|
+
response_format=response_format,
|
|
2892
|
+
tools=self._tools_for_model,
|
|
2893
|
+
functions=self._functions_for_model,
|
|
2894
|
+
tool_choice=self.tool_choice,
|
|
2895
|
+
tool_call_limit=self.tool_call_limit,
|
|
2810
2896
|
)
|
|
2897
|
+
# Check for cancellation after model call
|
|
2898
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2811
2899
|
|
|
2812
|
-
|
|
2813
|
-
|
|
2900
|
+
# If an output model is provided, generate output using the output model
|
|
2901
|
+
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
2814
2902
|
|
|
2815
|
-
|
|
2903
|
+
# If a parser model is provided, structure the response separately
|
|
2904
|
+
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
2816
2905
|
|
|
2817
|
-
|
|
2818
|
-
|
|
2906
|
+
# 9. Update the RunOutput with the model response
|
|
2907
|
+
self._update_run_response(
|
|
2908
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
2909
|
+
)
|
|
2819
2910
|
|
|
2820
|
-
|
|
2821
|
-
|
|
2822
|
-
|
|
2911
|
+
if self.store_media:
|
|
2912
|
+
self._store_media(run_response, model_response)
|
|
2913
|
+
else:
|
|
2914
|
+
self._scrub_media_from_run_output(run_response)
|
|
2823
2915
|
|
|
2824
|
-
|
|
2825
|
-
|
|
2826
|
-
|
|
2827
|
-
|
|
2828
|
-
|
|
2916
|
+
# Break out of the run function if a tool call is paused
|
|
2917
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
2918
|
+
return self._handle_agent_run_paused(
|
|
2919
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
2920
|
+
)
|
|
2921
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2922
|
+
|
|
2923
|
+
# 10. Calculate session metrics
|
|
2924
|
+
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
2925
|
+
|
|
2926
|
+
run_response.status = RunStatus.completed
|
|
2927
|
+
|
|
2928
|
+
# 11. Execute post-hooks
|
|
2929
|
+
if self.post_hooks is not None:
|
|
2930
|
+
await self._aexecute_post_hooks(
|
|
2931
|
+
hooks=self.post_hooks, # type: ignore
|
|
2932
|
+
run_output=run_response,
|
|
2933
|
+
session=agent_session,
|
|
2934
|
+
user_id=user_id,
|
|
2935
|
+
debug_mode=debug_mode,
|
|
2936
|
+
**kwargs,
|
|
2937
|
+
)
|
|
2938
|
+
|
|
2939
|
+
# Convert the response to the structured format if needed
|
|
2940
|
+
self._convert_response_to_structured_format(run_response)
|
|
2941
|
+
|
|
2942
|
+
if run_response.metrics:
|
|
2943
|
+
run_response.metrics.stop_timer()
|
|
2944
|
+
|
|
2945
|
+
# 12. Update Agent Memory
|
|
2946
|
+
async for _ in self._amake_memories_and_summaries(
|
|
2947
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
2948
|
+
):
|
|
2949
|
+
pass
|
|
2950
|
+
|
|
2951
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
2952
|
+
self.save_run_response_to_file(
|
|
2953
|
+
run_response=run_response,
|
|
2954
|
+
input=run_messages.user_message,
|
|
2955
|
+
session_id=agent_session.session_id,
|
|
2829
2956
|
user_id=user_id,
|
|
2830
|
-
debug_mode=debug_mode,
|
|
2831
|
-
**kwargs,
|
|
2832
2957
|
)
|
|
2833
2958
|
|
|
2834
|
-
|
|
2835
|
-
self.save_run_response_to_file(
|
|
2836
|
-
run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
|
|
2837
|
-
)
|
|
2959
|
+
agent_session.upsert_run(run=run_response)
|
|
2838
2960
|
|
|
2839
|
-
|
|
2840
|
-
|
|
2961
|
+
# 13. Save session to storage
|
|
2962
|
+
if self._has_async_db():
|
|
2963
|
+
await self.asave_session(session=agent_session)
|
|
2964
|
+
else:
|
|
2965
|
+
self.save_session(session=agent_session)
|
|
2841
2966
|
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
2845
|
-
):
|
|
2846
|
-
pass
|
|
2967
|
+
# Log Agent Telemetry
|
|
2968
|
+
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
2847
2969
|
|
|
2848
|
-
|
|
2849
|
-
self.save_session(session=session)
|
|
2970
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
2850
2971
|
|
|
2851
|
-
|
|
2852
|
-
await self._alog_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
2972
|
+
return run_response
|
|
2853
2973
|
|
|
2854
|
-
|
|
2974
|
+
except RunCancelledException as e:
|
|
2975
|
+
# Handle run cancellation
|
|
2976
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
2977
|
+
run_response.content = str(e)
|
|
2978
|
+
run_response.status = RunStatus.cancelled
|
|
2855
2979
|
|
|
2856
|
-
|
|
2980
|
+
# Update the Agent Session before exiting
|
|
2981
|
+
agent_session.upsert_run(run=run_response)
|
|
2982
|
+
if self._has_async_db():
|
|
2983
|
+
await self.asave_session(session=agent_session)
|
|
2984
|
+
else:
|
|
2985
|
+
self.save_session(session=agent_session)
|
|
2986
|
+
|
|
2987
|
+
return run_response
|
|
2988
|
+
finally:
|
|
2989
|
+
# Always clean up the run tracking
|
|
2990
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2857
2991
|
|
|
2858
2992
|
async def _acontinue_run_stream(
|
|
2859
2993
|
self,
|
|
2860
|
-
|
|
2861
|
-
|
|
2862
|
-
|
|
2994
|
+
session_id: str,
|
|
2995
|
+
run_response: Optional[RunOutput] = None,
|
|
2996
|
+
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2997
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
2998
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
2999
|
+
run_id: Optional[str] = None,
|
|
2863
3000
|
user_id: Optional[str] = None,
|
|
2864
3001
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2865
3002
|
stream_intermediate_steps: bool = False,
|
|
3003
|
+
yield_run_response: Optional[bool] = None,
|
|
2866
3004
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2867
3005
|
) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
|
|
2868
3006
|
"""Continue a previous run.
|
|
2869
3007
|
|
|
2870
3008
|
Steps:
|
|
2871
|
-
1.
|
|
2872
|
-
2.
|
|
2873
|
-
3.
|
|
2874
|
-
4.
|
|
2875
|
-
5.
|
|
2876
|
-
6.
|
|
2877
|
-
7.
|
|
2878
|
-
8.
|
|
3009
|
+
1. Resolve dependencies
|
|
3010
|
+
2. Read existing session from db
|
|
3011
|
+
3. Update session state and metadata
|
|
3012
|
+
4. Prepare run response
|
|
3013
|
+
5. Determine tools for model
|
|
3014
|
+
6. Prepare run messages
|
|
3015
|
+
7. Handle the updated tools
|
|
3016
|
+
8. Process model response
|
|
3017
|
+
9. Add the run to memory
|
|
3018
|
+
10. Update Agent Memory
|
|
3019
|
+
11. Calculate session metrics
|
|
3020
|
+
12. Create the run completed event
|
|
3021
|
+
13. Add the RunOutput to Agent Session
|
|
3022
|
+
14. Save session to storage
|
|
2879
3023
|
"""
|
|
2880
|
-
#
|
|
3024
|
+
log_debug(f"Agent Run Continue: {run_response.run_id}", center=True) # type: ignore
|
|
3025
|
+
|
|
3026
|
+
# 1. Resolve dependencies
|
|
2881
3027
|
if dependencies is not None:
|
|
2882
3028
|
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
2883
3029
|
|
|
2884
|
-
#
|
|
2885
|
-
|
|
2886
|
-
yield self._handle_event(create_run_continued_event(run_response), run_response)
|
|
2887
|
-
|
|
2888
|
-
# 1. Handle the updated tools
|
|
2889
|
-
async for event in self._ahandle_tool_call_updates_stream(run_response=run_response, run_messages=run_messages):
|
|
2890
|
-
yield event
|
|
2891
|
-
|
|
2892
|
-
# 2. Process model response
|
|
2893
|
-
async for event in self._ahandle_model_response_stream(
|
|
2894
|
-
session=session,
|
|
2895
|
-
run_response=run_response,
|
|
2896
|
-
run_messages=run_messages,
|
|
2897
|
-
response_format=response_format,
|
|
2898
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2899
|
-
):
|
|
2900
|
-
yield event
|
|
3030
|
+
# 2. Read existing session from db
|
|
3031
|
+
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2901
3032
|
|
|
2902
|
-
#
|
|
2903
|
-
|
|
2904
|
-
|
|
2905
|
-
|
|
2906
|
-
):
|
|
2907
|
-
yield item
|
|
2908
|
-
return
|
|
3033
|
+
# 3. Update session state and metadata
|
|
3034
|
+
self._update_metadata(session=agent_session)
|
|
3035
|
+
if session_state is not None:
|
|
3036
|
+
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
2909
3037
|
|
|
2910
|
-
#
|
|
2911
|
-
|
|
3038
|
+
# 4. Prepare run response
|
|
3039
|
+
if run_response is not None:
|
|
3040
|
+
# The run is continued from a provided run_response. This contains the updated tools.
|
|
3041
|
+
input = run_response.messages or []
|
|
3042
|
+
elif run_id is not None:
|
|
3043
|
+
# The run is continued from a run_id. This requires the updated tools to be passed.
|
|
3044
|
+
if updated_tools is None:
|
|
3045
|
+
raise ValueError("Updated tools are required to continue a run from a run_id.")
|
|
2912
3046
|
|
|
2913
|
-
|
|
3047
|
+
runs = agent_session.runs
|
|
3048
|
+
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
|
|
3049
|
+
if run_response is None:
|
|
3050
|
+
raise RuntimeError(f"No runs found for run ID {run_id}")
|
|
3051
|
+
run_response.tools = updated_tools
|
|
3052
|
+
input = run_response.messages or []
|
|
3053
|
+
else:
|
|
3054
|
+
raise ValueError("Either run_response or run_id must be provided.")
|
|
2914
3055
|
|
|
2915
|
-
|
|
2916
|
-
|
|
2917
|
-
run_response.metrics.stop_timer()
|
|
3056
|
+
run_response = cast(RunOutput, run_response)
|
|
3057
|
+
run_response.status = RunStatus.running
|
|
2918
3058
|
|
|
2919
|
-
#
|
|
2920
|
-
self.
|
|
2921
|
-
|
|
3059
|
+
# 5. Determine tools for model
|
|
3060
|
+
self.model = cast(Model, self.model)
|
|
3061
|
+
await self._adetermine_tools_for_model(
|
|
3062
|
+
model=self.model,
|
|
3063
|
+
run_response=run_response,
|
|
3064
|
+
session=agent_session,
|
|
3065
|
+
session_state=session_state,
|
|
3066
|
+
dependencies=dependencies,
|
|
3067
|
+
user_id=user_id,
|
|
3068
|
+
async_mode=True,
|
|
3069
|
+
knowledge_filters=knowledge_filters,
|
|
2922
3070
|
)
|
|
2923
3071
|
|
|
2924
|
-
#
|
|
2925
|
-
|
|
3072
|
+
# 6. Prepare run messages
|
|
3073
|
+
run_messages: RunMessages = self._get_continue_run_messages(
|
|
3074
|
+
input=input,
|
|
3075
|
+
)
|
|
2926
3076
|
|
|
2927
|
-
#
|
|
2928
|
-
|
|
2929
|
-
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
2930
|
-
):
|
|
2931
|
-
yield event
|
|
3077
|
+
# Register run for cancellation tracking
|
|
3078
|
+
register_run(run_response.run_id) # type: ignore
|
|
2932
3079
|
|
|
2933
|
-
|
|
2934
|
-
|
|
3080
|
+
try:
|
|
3081
|
+
# Start the Run by yielding a RunContinued event
|
|
3082
|
+
if stream_intermediate_steps:
|
|
3083
|
+
yield self._handle_event(create_run_continued_event(run_response), run_response)
|
|
2935
3084
|
|
|
2936
|
-
|
|
2937
|
-
|
|
3085
|
+
# 7. Handle the updated tools
|
|
3086
|
+
async for event in self._ahandle_tool_call_updates_stream(
|
|
3087
|
+
run_response=run_response, run_messages=run_messages
|
|
3088
|
+
):
|
|
3089
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3090
|
+
yield event
|
|
2938
3091
|
|
|
2939
|
-
|
|
2940
|
-
|
|
3092
|
+
# 8. Process model response
|
|
3093
|
+
if self.output_model is None:
|
|
3094
|
+
async for event in self._ahandle_model_response_stream(
|
|
3095
|
+
session=agent_session,
|
|
3096
|
+
run_response=run_response,
|
|
3097
|
+
run_messages=run_messages,
|
|
3098
|
+
response_format=response_format,
|
|
3099
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
3100
|
+
):
|
|
3101
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3102
|
+
yield event
|
|
3103
|
+
else:
|
|
3104
|
+
from agno.run.agent import (
|
|
3105
|
+
IntermediateRunContentEvent,
|
|
3106
|
+
RunContentEvent,
|
|
3107
|
+
) # type: ignore
|
|
2941
3108
|
|
|
2942
|
-
|
|
2943
|
-
|
|
3109
|
+
async for event in self._ahandle_model_response_stream(
|
|
3110
|
+
session=agent_session,
|
|
3111
|
+
run_response=run_response,
|
|
3112
|
+
run_messages=run_messages,
|
|
3113
|
+
response_format=response_format,
|
|
3114
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
3115
|
+
):
|
|
3116
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3117
|
+
if isinstance(event, RunContentEvent):
|
|
3118
|
+
if stream_intermediate_steps:
|
|
3119
|
+
yield IntermediateRunContentEvent(
|
|
3120
|
+
content=event.content,
|
|
3121
|
+
content_type=event.content_type,
|
|
3122
|
+
)
|
|
3123
|
+
else:
|
|
3124
|
+
yield event
|
|
2944
3125
|
|
|
2945
|
-
|
|
3126
|
+
# If an output model is provided, generate output using the output model
|
|
3127
|
+
async for event in self._agenerate_response_with_output_model_stream(
|
|
3128
|
+
session=agent_session,
|
|
3129
|
+
run_response=run_response,
|
|
3130
|
+
run_messages=run_messages,
|
|
3131
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
3132
|
+
):
|
|
3133
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3134
|
+
yield event
|
|
3135
|
+
|
|
3136
|
+
# Check for cancellation after model processing
|
|
3137
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3138
|
+
|
|
3139
|
+
# Break out of the run function if a tool call is paused
|
|
3140
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
3141
|
+
for item in self._handle_agent_run_paused_stream(
|
|
3142
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
3143
|
+
):
|
|
3144
|
+
yield item
|
|
3145
|
+
return
|
|
3146
|
+
|
|
3147
|
+
run_response.status = RunStatus.completed
|
|
3148
|
+
|
|
3149
|
+
# 9. Create the run completed event
|
|
3150
|
+
completed_event = self._handle_event(create_run_completed_event(run_response), run_response)
|
|
3151
|
+
|
|
3152
|
+
# Set the run duration
|
|
3153
|
+
if run_response.metrics:
|
|
3154
|
+
run_response.metrics.stop_timer()
|
|
3155
|
+
|
|
3156
|
+
# 10. Add the run to memory
|
|
3157
|
+
agent_session.upsert_run(run=run_response)
|
|
3158
|
+
|
|
3159
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
3160
|
+
self.save_run_response_to_file(
|
|
3161
|
+
run_response=run_response,
|
|
3162
|
+
input=run_messages.user_message,
|
|
3163
|
+
session_id=agent_session.session_id,
|
|
3164
|
+
user_id=user_id,
|
|
3165
|
+
)
|
|
3166
|
+
|
|
3167
|
+
# 11. Calculate session metrics
|
|
3168
|
+
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
3169
|
+
|
|
3170
|
+
# 12. Update Agent Memory
|
|
3171
|
+
async for event in self._amake_memories_and_summaries(
|
|
3172
|
+
run_response=run_response, run_messages=run_messages, session=agent_session, user_id=user_id
|
|
3173
|
+
):
|
|
3174
|
+
yield event
|
|
3175
|
+
|
|
3176
|
+
# 13. Save session to storage
|
|
3177
|
+
if self._has_async_db():
|
|
3178
|
+
await self.asave_session(session=agent_session)
|
|
3179
|
+
else:
|
|
3180
|
+
self.save_session(session=agent_session)
|
|
3181
|
+
|
|
3182
|
+
if stream_intermediate_steps:
|
|
3183
|
+
yield completed_event
|
|
3184
|
+
|
|
3185
|
+
if yield_run_response:
|
|
3186
|
+
yield run_response
|
|
3187
|
+
|
|
3188
|
+
# Log Agent Telemetry
|
|
3189
|
+
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
3190
|
+
|
|
3191
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
3192
|
+
except RunCancelledException as e:
|
|
3193
|
+
# Handle run cancellation during streaming
|
|
3194
|
+
log_info(f"Run {run_response.run_id} was cancelled during streaming")
|
|
3195
|
+
run_response.status = RunStatus.cancelled
|
|
3196
|
+
run_response.content = str(e)
|
|
3197
|
+
|
|
3198
|
+
# Yield the cancellation event
|
|
3199
|
+
yield self._handle_event(
|
|
3200
|
+
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
3201
|
+
run_response,
|
|
3202
|
+
)
|
|
3203
|
+
|
|
3204
|
+
# Add the RunOutput to Agent Session even when cancelled
|
|
3205
|
+
agent_session.upsert_run(run=run_response)
|
|
3206
|
+
if self._has_async_db():
|
|
3207
|
+
await self.asave_session(session=agent_session)
|
|
3208
|
+
else:
|
|
3209
|
+
self.save_session(session=agent_session)
|
|
3210
|
+
finally:
|
|
3211
|
+
# Always clean up the run tracking
|
|
3212
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2946
3213
|
|
|
2947
3214
|
def _execute_pre_hooks(
|
|
2948
3215
|
self,
|
|
@@ -4125,7 +4392,7 @@ class Agent:
|
|
|
4125
4392
|
|
|
4126
4393
|
tasks.append(
|
|
4127
4394
|
self.memory_manager.acreate_user_memories(
|
|
4128
|
-
message=run_messages.user_message.get_content_string(), user_id=user_id
|
|
4395
|
+
message=run_messages.user_message.get_content_string(), user_id=user_id, agent_id=self.id
|
|
4129
4396
|
)
|
|
4130
4397
|
)
|
|
4131
4398
|
|
|
@@ -4149,7 +4416,11 @@ class Agent:
|
|
|
4149
4416
|
continue
|
|
4150
4417
|
|
|
4151
4418
|
if len(parsed_messages) > 0:
|
|
4152
|
-
tasks.append(
|
|
4419
|
+
tasks.append(
|
|
4420
|
+
self.memory_manager.acreate_user_memories(
|
|
4421
|
+
messages=parsed_messages, user_id=user_id, agent_id=self.id
|
|
4422
|
+
)
|
|
4423
|
+
)
|
|
4153
4424
|
else:
|
|
4154
4425
|
log_warning("Unable to add messages to memory")
|
|
4155
4426
|
|
|
@@ -4298,6 +4569,100 @@ class Agent:
|
|
|
4298
4569
|
|
|
4299
4570
|
return agent_tools
|
|
4300
4571
|
|
|
4572
|
+
async def aget_tools(
|
|
4573
|
+
self,
|
|
4574
|
+
run_response: RunOutput,
|
|
4575
|
+
session: AgentSession,
|
|
4576
|
+
async_mode: bool = False,
|
|
4577
|
+
user_id: Optional[str] = None,
|
|
4578
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
4579
|
+
) -> Optional[List[Union[Toolkit, Callable, Function, Dict]]]:
|
|
4580
|
+
agent_tools: List[Union[Toolkit, Callable, Function, Dict]] = []
|
|
4581
|
+
|
|
4582
|
+
# Add provided tools
|
|
4583
|
+
if self.tools is not None:
|
|
4584
|
+
agent_tools.extend(self.tools)
|
|
4585
|
+
|
|
4586
|
+
# If any of the tools has "agent" as parameter, set _rebuild_tools to True
|
|
4587
|
+
for tool in agent_tools:
|
|
4588
|
+
if isinstance(tool, Function):
|
|
4589
|
+
if "agent" in tool.parameters:
|
|
4590
|
+
self._rebuild_tools = True
|
|
4591
|
+
break
|
|
4592
|
+
if "team" in tool.parameters:
|
|
4593
|
+
self._rebuild_tools = True
|
|
4594
|
+
break
|
|
4595
|
+
if isinstance(tool, Toolkit):
|
|
4596
|
+
for func in tool.functions.values():
|
|
4597
|
+
if "agent" in func.parameters:
|
|
4598
|
+
self._rebuild_tools = True
|
|
4599
|
+
break
|
|
4600
|
+
if "team" in func.parameters:
|
|
4601
|
+
self._rebuild_tools = True
|
|
4602
|
+
break
|
|
4603
|
+
if callable(tool):
|
|
4604
|
+
from inspect import signature
|
|
4605
|
+
|
|
4606
|
+
sig = signature(tool)
|
|
4607
|
+
if "agent" in sig.parameters:
|
|
4608
|
+
self._rebuild_tools = True
|
|
4609
|
+
break
|
|
4610
|
+
if "team" in sig.parameters:
|
|
4611
|
+
self._rebuild_tools = True
|
|
4612
|
+
break
|
|
4613
|
+
|
|
4614
|
+
# Add tools for accessing memory
|
|
4615
|
+
if self.read_chat_history:
|
|
4616
|
+
agent_tools.append(self._get_chat_history_function(session=session))
|
|
4617
|
+
self._rebuild_tools = True
|
|
4618
|
+
if self.read_tool_call_history:
|
|
4619
|
+
agent_tools.append(self._get_tool_call_history_function(session=session))
|
|
4620
|
+
self._rebuild_tools = True
|
|
4621
|
+
if self.search_session_history:
|
|
4622
|
+
agent_tools.append(
|
|
4623
|
+
await self._aget_previous_sessions_messages_function(num_history_sessions=self.num_history_sessions)
|
|
4624
|
+
)
|
|
4625
|
+
self._rebuild_tools = True
|
|
4626
|
+
|
|
4627
|
+
if self.enable_agentic_memory:
|
|
4628
|
+
agent_tools.append(self._get_update_user_memory_function(user_id=user_id, async_mode=async_mode))
|
|
4629
|
+
self._rebuild_tools = True
|
|
4630
|
+
|
|
4631
|
+
if self.enable_agentic_state:
|
|
4632
|
+
agent_tools.append(self.update_session_state)
|
|
4633
|
+
|
|
4634
|
+
# Add tools for accessing knowledge
|
|
4635
|
+
if self.knowledge is not None or self.knowledge_retriever is not None:
|
|
4636
|
+
# Check if knowledge retriever is an async function but used in sync mode
|
|
4637
|
+
from inspect import iscoroutinefunction
|
|
4638
|
+
|
|
4639
|
+
if not async_mode and self.knowledge_retriever and iscoroutinefunction(self.knowledge_retriever):
|
|
4640
|
+
log_warning(
|
|
4641
|
+
"Async knowledge retriever function is being used with synchronous agent.run() or agent.print_response(). "
|
|
4642
|
+
"It is recommended to use agent.arun() or agent.aprint_response() instead."
|
|
4643
|
+
)
|
|
4644
|
+
|
|
4645
|
+
if self.search_knowledge:
|
|
4646
|
+
# Use async or sync search based on async_mode
|
|
4647
|
+
if self.enable_agentic_knowledge_filters:
|
|
4648
|
+
agent_tools.append(
|
|
4649
|
+
self._search_knowledge_base_with_agentic_filters_function(
|
|
4650
|
+
run_response=run_response, async_mode=async_mode, knowledge_filters=knowledge_filters
|
|
4651
|
+
)
|
|
4652
|
+
)
|
|
4653
|
+
else:
|
|
4654
|
+
agent_tools.append(
|
|
4655
|
+
self._get_search_knowledge_base_function(
|
|
4656
|
+
run_response=run_response, async_mode=async_mode, knowledge_filters=knowledge_filters
|
|
4657
|
+
)
|
|
4658
|
+
)
|
|
4659
|
+
self._rebuild_tools = True
|
|
4660
|
+
|
|
4661
|
+
if self.update_knowledge:
|
|
4662
|
+
agent_tools.append(self.add_to_knowledge)
|
|
4663
|
+
|
|
4664
|
+
return agent_tools
|
|
4665
|
+
|
|
4301
4666
|
def _collect_joint_images(
|
|
4302
4667
|
self,
|
|
4303
4668
|
run_input: Optional[RunInput] = None,
|
|
@@ -4551,42 +4916,164 @@ class Agent:
|
|
|
4551
4916
|
func._audios = joint_audios
|
|
4552
4917
|
func._videos = joint_videos
|
|
4553
4918
|
|
|
4554
|
-
def
|
|
4555
|
-
self
|
|
4556
|
-
|
|
4557
|
-
|
|
4558
|
-
|
|
4559
|
-
|
|
4560
|
-
|
|
4919
|
+
async def _adetermine_tools_for_model(
|
|
4920
|
+
self,
|
|
4921
|
+
model: Model,
|
|
4922
|
+
run_response: RunOutput,
|
|
4923
|
+
session: AgentSession,
|
|
4924
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
4925
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
4926
|
+
user_id: Optional[str] = None,
|
|
4927
|
+
async_mode: bool = False,
|
|
4928
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
4929
|
+
) -> None:
|
|
4930
|
+
if self._rebuild_tools:
|
|
4931
|
+
self._rebuild_tools = False
|
|
4561
4932
|
|
|
4562
|
-
|
|
4563
|
-
|
|
4564
|
-
|
|
4565
|
-
|
|
4566
|
-
|
|
4567
|
-
|
|
4933
|
+
agent_tools = await self.aget_tools(
|
|
4934
|
+
run_response=run_response,
|
|
4935
|
+
session=session,
|
|
4936
|
+
async_mode=async_mode,
|
|
4937
|
+
user_id=user_id,
|
|
4938
|
+
knowledge_filters=knowledge_filters,
|
|
4939
|
+
)
|
|
4568
4940
|
|
|
4569
|
-
|
|
4570
|
-
|
|
4571
|
-
|
|
4572
|
-
return self.output_schema
|
|
4573
|
-
else:
|
|
4574
|
-
log_debug(
|
|
4575
|
-
"Model supports native structured outputs but it is not enabled. Using JSON mode instead."
|
|
4576
|
-
)
|
|
4577
|
-
return json_response_format
|
|
4941
|
+
self._tools_for_model = []
|
|
4942
|
+
self._functions_for_model = {}
|
|
4943
|
+
self._tool_instructions = []
|
|
4578
4944
|
|
|
4579
|
-
|
|
4580
|
-
|
|
4581
|
-
|
|
4582
|
-
|
|
4583
|
-
|
|
4584
|
-
|
|
4585
|
-
|
|
4586
|
-
|
|
4587
|
-
|
|
4588
|
-
|
|
4589
|
-
|
|
4945
|
+
# Get Agent tools
|
|
4946
|
+
if agent_tools is not None and len(agent_tools) > 0:
|
|
4947
|
+
log_debug("Processing tools for model")
|
|
4948
|
+
|
|
4949
|
+
# Check if we need strict mode for the functions for the model
|
|
4950
|
+
strict = False
|
|
4951
|
+
if (
|
|
4952
|
+
self.output_schema is not None
|
|
4953
|
+
and (self.structured_outputs or (not self.use_json_mode))
|
|
4954
|
+
and model.supports_native_structured_outputs
|
|
4955
|
+
):
|
|
4956
|
+
strict = True
|
|
4957
|
+
|
|
4958
|
+
for tool in agent_tools:
|
|
4959
|
+
if isinstance(tool, Dict):
|
|
4960
|
+
# If a dict is passed, it is a builtin tool
|
|
4961
|
+
# that is run by the model provider and not the Agent
|
|
4962
|
+
self._tools_for_model.append(tool)
|
|
4963
|
+
log_debug(f"Included builtin tool {tool}")
|
|
4964
|
+
|
|
4965
|
+
elif isinstance(tool, Toolkit):
|
|
4966
|
+
# For each function in the toolkit and process entrypoint
|
|
4967
|
+
for name, func in tool.functions.items():
|
|
4968
|
+
# If the function does not exist in self.functions
|
|
4969
|
+
if name not in self._functions_for_model:
|
|
4970
|
+
func._agent = self
|
|
4971
|
+
func.process_entrypoint(strict=strict)
|
|
4972
|
+
if strict and func.strict is None:
|
|
4973
|
+
func.strict = True
|
|
4974
|
+
if self.tool_hooks is not None:
|
|
4975
|
+
func.tool_hooks = self.tool_hooks
|
|
4976
|
+
self._functions_for_model[name] = func
|
|
4977
|
+
self._tools_for_model.append({"type": "function", "function": func.to_dict()})
|
|
4978
|
+
log_debug(f"Added tool {name} from {tool.name}")
|
|
4979
|
+
|
|
4980
|
+
# Add instructions from the toolkit
|
|
4981
|
+
if tool.add_instructions and tool.instructions is not None:
|
|
4982
|
+
self._tool_instructions.append(tool.instructions)
|
|
4983
|
+
|
|
4984
|
+
elif isinstance(tool, Function):
|
|
4985
|
+
if tool.name not in self._functions_for_model:
|
|
4986
|
+
tool._agent = self
|
|
4987
|
+
tool.process_entrypoint(strict=strict)
|
|
4988
|
+
if strict and tool.strict is None:
|
|
4989
|
+
tool.strict = True
|
|
4990
|
+
if self.tool_hooks is not None:
|
|
4991
|
+
tool.tool_hooks = self.tool_hooks
|
|
4992
|
+
self._functions_for_model[tool.name] = tool
|
|
4993
|
+
self._tools_for_model.append({"type": "function", "function": tool.to_dict()})
|
|
4994
|
+
log_debug(f"Added tool {tool.name}")
|
|
4995
|
+
|
|
4996
|
+
# Add instructions from the Function
|
|
4997
|
+
if tool.add_instructions and tool.instructions is not None:
|
|
4998
|
+
self._tool_instructions.append(tool.instructions)
|
|
4999
|
+
|
|
5000
|
+
elif callable(tool):
|
|
5001
|
+
try:
|
|
5002
|
+
function_name = tool.__name__
|
|
5003
|
+
if function_name not in self._functions_for_model:
|
|
5004
|
+
func = Function.from_callable(tool, strict=strict)
|
|
5005
|
+
func._agent = self
|
|
5006
|
+
if strict:
|
|
5007
|
+
func.strict = True
|
|
5008
|
+
if self.tool_hooks is not None:
|
|
5009
|
+
func.tool_hooks = self.tool_hooks
|
|
5010
|
+
self._functions_for_model[func.name] = func
|
|
5011
|
+
self._tools_for_model.append({"type": "function", "function": func.to_dict()})
|
|
5012
|
+
log_debug(f"Added tool {func.name}")
|
|
5013
|
+
except Exception as e:
|
|
5014
|
+
log_warning(f"Could not add tool {tool}: {e}")
|
|
5015
|
+
|
|
5016
|
+
# Update the session state for the functions
|
|
5017
|
+
if self._functions_for_model:
|
|
5018
|
+
from inspect import signature
|
|
5019
|
+
|
|
5020
|
+
# Check if any functions need media before collecting
|
|
5021
|
+
needs_media = any(
|
|
5022
|
+
any(param in signature(func.entrypoint).parameters for param in ["images", "videos", "audios", "files"])
|
|
5023
|
+
for func in self._functions_for_model.values()
|
|
5024
|
+
if func.entrypoint is not None
|
|
5025
|
+
)
|
|
5026
|
+
|
|
5027
|
+
# Only collect media if functions actually need them
|
|
5028
|
+
joint_images = self._collect_joint_images(run_response.input, session) if needs_media else None
|
|
5029
|
+
joint_files = self._collect_joint_files(run_response.input) if needs_media else None
|
|
5030
|
+
joint_audios = self._collect_joint_audios(run_response.input, session) if needs_media else None
|
|
5031
|
+
joint_videos = self._collect_joint_videos(run_response.input, session) if needs_media else None
|
|
5032
|
+
|
|
5033
|
+
for func in self._functions_for_model.values():
|
|
5034
|
+
func._session_state = session_state
|
|
5035
|
+
func._dependencies = dependencies
|
|
5036
|
+
func._images = joint_images
|
|
5037
|
+
func._files = joint_files
|
|
5038
|
+
func._audios = joint_audios
|
|
5039
|
+
func._videos = joint_videos
|
|
5040
|
+
|
|
5041
|
+
def _model_should_return_structured_output(self):
|
|
5042
|
+
self.model = cast(Model, self.model)
|
|
5043
|
+
return bool(
|
|
5044
|
+
self.model.supports_native_structured_outputs
|
|
5045
|
+
and self.output_schema is not None
|
|
5046
|
+
and (not self.use_json_mode or self.structured_outputs)
|
|
5047
|
+
)
|
|
5048
|
+
|
|
5049
|
+
def _get_response_format(self, model: Optional[Model] = None) -> Optional[Union[Dict, Type[BaseModel]]]:
|
|
5050
|
+
model = cast(Model, model or self.model)
|
|
5051
|
+
if self.output_schema is None:
|
|
5052
|
+
return None
|
|
5053
|
+
else:
|
|
5054
|
+
json_response_format = {"type": "json_object"}
|
|
5055
|
+
|
|
5056
|
+
if model.supports_native_structured_outputs:
|
|
5057
|
+
if not self.use_json_mode or self.structured_outputs:
|
|
5058
|
+
log_debug("Setting Model.response_format to Agent.output_schema")
|
|
5059
|
+
return self.output_schema
|
|
5060
|
+
else:
|
|
5061
|
+
log_debug(
|
|
5062
|
+
"Model supports native structured outputs but it is not enabled. Using JSON mode instead."
|
|
5063
|
+
)
|
|
5064
|
+
return json_response_format
|
|
5065
|
+
|
|
5066
|
+
elif model.supports_json_schema_outputs:
|
|
5067
|
+
if self.use_json_mode or (not self.structured_outputs):
|
|
5068
|
+
log_debug("Setting Model.response_format to JSON response mode")
|
|
5069
|
+
return {
|
|
5070
|
+
"type": "json_schema",
|
|
5071
|
+
"json_schema": {
|
|
5072
|
+
"name": self.output_schema.__name__,
|
|
5073
|
+
"schema": self.output_schema.model_json_schema(),
|
|
5074
|
+
},
|
|
5075
|
+
}
|
|
5076
|
+
else:
|
|
4590
5077
|
return None
|
|
4591
5078
|
|
|
4592
5079
|
else:
|
|
@@ -4660,6 +5147,16 @@ class Agent:
|
|
|
4660
5147
|
log_warning(f"Error getting session from db: {e}")
|
|
4661
5148
|
return None
|
|
4662
5149
|
|
|
5150
|
+
async def _aread_session(self, session_id: str) -> Optional[AgentSession]:
|
|
5151
|
+
"""Get a Session from the database."""
|
|
5152
|
+
try:
|
|
5153
|
+
if not self.db:
|
|
5154
|
+
raise ValueError("Db not initialized")
|
|
5155
|
+
return await self.db.get_session(session_id=session_id, session_type=SessionType.AGENT) # type: ignore
|
|
5156
|
+
except Exception as e:
|
|
5157
|
+
log_warning(f"Error getting session from db: {e}")
|
|
5158
|
+
return None
|
|
5159
|
+
|
|
4663
5160
|
def _upsert_session(self, session: AgentSession) -> Optional[AgentSession]:
|
|
4664
5161
|
"""Upsert a Session into the database."""
|
|
4665
5162
|
|
|
@@ -4671,6 +5168,16 @@ class Agent:
|
|
|
4671
5168
|
log_warning(f"Error upserting session into db: {e}")
|
|
4672
5169
|
return None
|
|
4673
5170
|
|
|
5171
|
+
async def _aupsert_session(self, session: AgentSession) -> Optional[AgentSession]:
|
|
5172
|
+
"""Upsert a Session into the database."""
|
|
5173
|
+
try:
|
|
5174
|
+
if not self.db:
|
|
5175
|
+
raise ValueError("Db not initialized")
|
|
5176
|
+
return await self.db.upsert_session(session=session) # type: ignore
|
|
5177
|
+
except Exception as e:
|
|
5178
|
+
log_warning(f"Error upserting session into db: {e}")
|
|
5179
|
+
return None
|
|
5180
|
+
|
|
4674
5181
|
def _load_session_state(self, session: AgentSession, session_state: Dict[str, Any]):
|
|
4675
5182
|
"""Load and return the stored session_state from the database, optionally merging it with the given one"""
|
|
4676
5183
|
|
|
@@ -4756,6 +5263,42 @@ class Agent:
|
|
|
4756
5263
|
|
|
4757
5264
|
return agent_session
|
|
4758
5265
|
|
|
5266
|
+
async def _aread_or_create_session(
|
|
5267
|
+
self,
|
|
5268
|
+
session_id: str,
|
|
5269
|
+
user_id: Optional[str] = None,
|
|
5270
|
+
) -> AgentSession:
|
|
5271
|
+
from time import time
|
|
5272
|
+
|
|
5273
|
+
# Returning cached session if we have one
|
|
5274
|
+
if self._agent_session is not None and self._agent_session.session_id == session_id:
|
|
5275
|
+
return self._agent_session
|
|
5276
|
+
|
|
5277
|
+
# Try to load from database
|
|
5278
|
+
agent_session = None
|
|
5279
|
+
if self.db is not None and self.team_id is None and self.workflow_id is None:
|
|
5280
|
+
log_debug(f"Reading AgentSession: {session_id}")
|
|
5281
|
+
|
|
5282
|
+
agent_session = cast(AgentSession, await self._aread_session(session_id=session_id))
|
|
5283
|
+
|
|
5284
|
+
if agent_session is None:
|
|
5285
|
+
# Creating new session if none found
|
|
5286
|
+
log_debug(f"Creating new AgentSession: {session_id}")
|
|
5287
|
+
agent_session = AgentSession(
|
|
5288
|
+
session_id=session_id,
|
|
5289
|
+
agent_id=self.id,
|
|
5290
|
+
user_id=user_id,
|
|
5291
|
+
agent_data=self._get_agent_data(),
|
|
5292
|
+
session_data={},
|
|
5293
|
+
metadata=self.metadata,
|
|
5294
|
+
created_at=int(time()),
|
|
5295
|
+
)
|
|
5296
|
+
|
|
5297
|
+
if self.cache_session:
|
|
5298
|
+
self._agent_session = agent_session
|
|
5299
|
+
|
|
5300
|
+
return agent_session
|
|
5301
|
+
|
|
4759
5302
|
def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[RunOutput]:
|
|
4760
5303
|
"""
|
|
4761
5304
|
Get a RunOutput from the database.
|
|
@@ -4876,6 +5419,27 @@ class Agent:
|
|
|
4876
5419
|
self._upsert_session(session=session)
|
|
4877
5420
|
log_debug(f"Created or updated AgentSession record: {session.session_id}")
|
|
4878
5421
|
|
|
5422
|
+
async def asave_session(self, session: AgentSession) -> None:
|
|
5423
|
+
"""Save the AgentSession to storage
|
|
5424
|
+
|
|
5425
|
+
Returns:
|
|
5426
|
+
Optional[AgentSession]: The saved AgentSession or None if not saved.
|
|
5427
|
+
"""
|
|
5428
|
+
# If the agent is a member of a team, do not save the session to the database
|
|
5429
|
+
if (
|
|
5430
|
+
self.db is not None
|
|
5431
|
+
and self.team_id is None
|
|
5432
|
+
and self.workflow_id is None
|
|
5433
|
+
and session.session_data is not None
|
|
5434
|
+
):
|
|
5435
|
+
if session.session_data is not None and "session_state" in session.session_data:
|
|
5436
|
+
session.session_data["session_state"].pop("current_session_id", None)
|
|
5437
|
+
session.session_data["session_state"].pop("current_user_id", None)
|
|
5438
|
+
session.session_data["session_state"].pop("current_run_id", None)
|
|
5439
|
+
|
|
5440
|
+
await self._aupsert_session(session=session)
|
|
5441
|
+
log_debug(f"Created or updated AgentSession record: {session.session_id}")
|
|
5442
|
+
|
|
4879
5443
|
def get_chat_history(self, session_id: Optional[str] = None) -> List[Message]:
|
|
4880
5444
|
"""Read the chat history from the session"""
|
|
4881
5445
|
if not session_id and not self.session_id:
|
|
@@ -5034,8 +5598,7 @@ class Agent:
|
|
|
5034
5598
|
session = self.get_session(session_id=session_id) # type: ignore
|
|
5035
5599
|
|
|
5036
5600
|
if session is None:
|
|
5037
|
-
|
|
5038
|
-
return []
|
|
5601
|
+
raise Exception("Session not found")
|
|
5039
5602
|
|
|
5040
5603
|
# Only filter by agent_id if this is part of a team
|
|
5041
5604
|
return session.get_messages_from_last_n_runs(
|
|
@@ -5065,6 +5628,16 @@ class Agent:
|
|
|
5065
5628
|
|
|
5066
5629
|
return self.memory_manager.get_user_memories(user_id=user_id)
|
|
5067
5630
|
|
|
5631
|
+
async def aget_user_memories(self, user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
|
|
5632
|
+
"""Get the user memories for the given user ID."""
|
|
5633
|
+
if self.memory_manager is None:
|
|
5634
|
+
return None
|
|
5635
|
+
user_id = user_id if user_id is not None else self.user_id
|
|
5636
|
+
if user_id is None:
|
|
5637
|
+
user_id = "default"
|
|
5638
|
+
|
|
5639
|
+
return await self.memory_manager.aget_user_memories(user_id=user_id)
|
|
5640
|
+
|
|
5068
5641
|
def _format_message_with_state_variables(
|
|
5069
5642
|
self,
|
|
5070
5643
|
message: Any,
|
|
@@ -5081,37 +5654,308 @@ class Agent:
|
|
|
5081
5654
|
if not isinstance(message, str):
|
|
5082
5655
|
return message
|
|
5083
5656
|
|
|
5084
|
-
# Should already be resolved and passed from run() method
|
|
5085
|
-
format_variables = ChainMap(
|
|
5086
|
-
session_state or {},
|
|
5087
|
-
dependencies or {},
|
|
5088
|
-
metadata or {},
|
|
5089
|
-
{"user_id": user_id} if user_id is not None else {},
|
|
5657
|
+
# Should already be resolved and passed from run() method
|
|
5658
|
+
format_variables = ChainMap(
|
|
5659
|
+
session_state or {},
|
|
5660
|
+
dependencies or {},
|
|
5661
|
+
metadata or {},
|
|
5662
|
+
{"user_id": user_id} if user_id is not None else {},
|
|
5663
|
+
)
|
|
5664
|
+
converted_msg = deepcopy(message)
|
|
5665
|
+
for var_name in format_variables.keys():
|
|
5666
|
+
# Only convert standalone {var_name} patterns, not nested ones
|
|
5667
|
+
pattern = r"\{" + re.escape(var_name) + r"\}"
|
|
5668
|
+
replacement = "${" + var_name + "}"
|
|
5669
|
+
converted_msg = re.sub(pattern, replacement, converted_msg)
|
|
5670
|
+
|
|
5671
|
+
# Use Template to safely substitute variables
|
|
5672
|
+
template = string.Template(converted_msg)
|
|
5673
|
+
try:
|
|
5674
|
+
result = template.safe_substitute(format_variables)
|
|
5675
|
+
return result
|
|
5676
|
+
except Exception as e:
|
|
5677
|
+
log_warning(f"Template substitution failed: {e}")
|
|
5678
|
+
return message
|
|
5679
|
+
|
|
5680
|
+
def get_system_message(
|
|
5681
|
+
self,
|
|
5682
|
+
session: AgentSession,
|
|
5683
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
5684
|
+
user_id: Optional[str] = None,
|
|
5685
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
5686
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
5687
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
5688
|
+
) -> Optional[Message]:
|
|
5689
|
+
"""Return the system message for the Agent.
|
|
5690
|
+
|
|
5691
|
+
1. If the system_message is provided, use that.
|
|
5692
|
+
2. If build_context is False, return None.
|
|
5693
|
+
3. Build and return the default system message for the Agent.
|
|
5694
|
+
"""
|
|
5695
|
+
|
|
5696
|
+
# 1. If the system_message is provided, use that.
|
|
5697
|
+
if self.system_message is not None:
|
|
5698
|
+
if isinstance(self.system_message, Message):
|
|
5699
|
+
return self.system_message
|
|
5700
|
+
|
|
5701
|
+
sys_message_content: str = ""
|
|
5702
|
+
if isinstance(self.system_message, str):
|
|
5703
|
+
sys_message_content = self.system_message
|
|
5704
|
+
elif callable(self.system_message):
|
|
5705
|
+
sys_message_content = self.system_message(agent=self)
|
|
5706
|
+
if not isinstance(sys_message_content, str):
|
|
5707
|
+
raise Exception("system_message must return a string")
|
|
5708
|
+
|
|
5709
|
+
# Format the system message with the session state variables
|
|
5710
|
+
if self.resolve_in_context:
|
|
5711
|
+
sys_message_content = self._format_message_with_state_variables(
|
|
5712
|
+
sys_message_content,
|
|
5713
|
+
user_id=user_id,
|
|
5714
|
+
dependencies=dependencies,
|
|
5715
|
+
metadata=metadata,
|
|
5716
|
+
session_state=session_state,
|
|
5717
|
+
)
|
|
5718
|
+
|
|
5719
|
+
# type: ignore
|
|
5720
|
+
return Message(role=self.system_message_role, content=sys_message_content)
|
|
5721
|
+
|
|
5722
|
+
# 2. If build_context is False, return None.
|
|
5723
|
+
if not self.build_context:
|
|
5724
|
+
return None
|
|
5725
|
+
|
|
5726
|
+
if self.model is None:
|
|
5727
|
+
raise Exception("model not set")
|
|
5728
|
+
|
|
5729
|
+
# 3. Build and return the default system message for the Agent.
|
|
5730
|
+
# 3.1 Build the list of instructions for the system message
|
|
5731
|
+
instructions: List[str] = []
|
|
5732
|
+
if self.instructions is not None:
|
|
5733
|
+
_instructions = self.instructions
|
|
5734
|
+
if callable(self.instructions):
|
|
5735
|
+
import inspect
|
|
5736
|
+
|
|
5737
|
+
signature = inspect.signature(self.instructions)
|
|
5738
|
+
instruction_args: Dict[str, Any] = {}
|
|
5739
|
+
|
|
5740
|
+
# Check for agent parameter
|
|
5741
|
+
if "agent" in signature.parameters:
|
|
5742
|
+
instruction_args["agent"] = self
|
|
5743
|
+
|
|
5744
|
+
# Check for session_state parameter
|
|
5745
|
+
if "session_state" in signature.parameters:
|
|
5746
|
+
instruction_args["session_state"] = session_state or {}
|
|
5747
|
+
|
|
5748
|
+
_instructions = self.instructions(**instruction_args)
|
|
5749
|
+
|
|
5750
|
+
if isinstance(_instructions, str):
|
|
5751
|
+
instructions.append(_instructions)
|
|
5752
|
+
elif isinstance(_instructions, list):
|
|
5753
|
+
instructions.extend(_instructions)
|
|
5754
|
+
|
|
5755
|
+
# 3.1.1 Add instructions from the Model
|
|
5756
|
+
_model_instructions = self.model.get_instructions_for_model(self._tools_for_model)
|
|
5757
|
+
if _model_instructions is not None:
|
|
5758
|
+
instructions.extend(_model_instructions)
|
|
5759
|
+
|
|
5760
|
+
# 3.2 Build a list of additional information for the system message
|
|
5761
|
+
additional_information: List[str] = []
|
|
5762
|
+
# 3.2.1 Add instructions for using markdown
|
|
5763
|
+
if self.markdown and self.output_schema is None:
|
|
5764
|
+
additional_information.append("Use markdown to format your answers.")
|
|
5765
|
+
# 3.2.2 Add the current datetime
|
|
5766
|
+
if self.add_datetime_to_context:
|
|
5767
|
+
from datetime import datetime
|
|
5768
|
+
|
|
5769
|
+
tz = None
|
|
5770
|
+
|
|
5771
|
+
if self.timezone_identifier:
|
|
5772
|
+
try:
|
|
5773
|
+
from zoneinfo import ZoneInfo
|
|
5774
|
+
|
|
5775
|
+
tz = ZoneInfo(self.timezone_identifier)
|
|
5776
|
+
except Exception:
|
|
5777
|
+
log_warning("Invalid timezone identifier")
|
|
5778
|
+
|
|
5779
|
+
time = datetime.now(tz) if tz else datetime.now()
|
|
5780
|
+
|
|
5781
|
+
additional_information.append(f"The current time is {time}.")
|
|
5782
|
+
|
|
5783
|
+
# 3.2.3 Add the current location
|
|
5784
|
+
if self.add_location_to_context:
|
|
5785
|
+
from agno.utils.location import get_location
|
|
5786
|
+
|
|
5787
|
+
location = get_location()
|
|
5788
|
+
if location:
|
|
5789
|
+
location_str = ", ".join(
|
|
5790
|
+
filter(None, [location.get("city"), location.get("region"), location.get("country")])
|
|
5791
|
+
)
|
|
5792
|
+
if location_str:
|
|
5793
|
+
additional_information.append(f"Your approximate location is: {location_str}.")
|
|
5794
|
+
|
|
5795
|
+
# 3.2.4 Add agent name if provided
|
|
5796
|
+
if self.name is not None and self.add_name_to_context:
|
|
5797
|
+
additional_information.append(f"Your name is: {self.name}.")
|
|
5798
|
+
|
|
5799
|
+
# 3.2.5 Add information about agentic filters if enabled
|
|
5800
|
+
if self.knowledge is not None and self.enable_agentic_knowledge_filters:
|
|
5801
|
+
valid_filters = self.knowledge.get_valid_filters()
|
|
5802
|
+
if valid_filters:
|
|
5803
|
+
valid_filters_str = ", ".join(valid_filters)
|
|
5804
|
+
additional_information.append(
|
|
5805
|
+
dedent(f"""
|
|
5806
|
+
The knowledge base contains documents with these metadata filters: {valid_filters_str}.
|
|
5807
|
+
Always use filters when the user query indicates specific metadata.
|
|
5808
|
+
|
|
5809
|
+
Examples:
|
|
5810
|
+
1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
|
|
5811
|
+
2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
|
|
5812
|
+
4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
|
|
5813
|
+
|
|
5814
|
+
General Guidelines:
|
|
5815
|
+
- Always analyze the user query to identify relevant metadata.
|
|
5816
|
+
- Use the most specific filter(s) possible to narrow down results.
|
|
5817
|
+
- If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
|
|
5818
|
+
- Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
|
|
5819
|
+
|
|
5820
|
+
You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
|
|
5821
|
+
""")
|
|
5822
|
+
)
|
|
5823
|
+
|
|
5824
|
+
# 3.3 Build the default system message for the Agent.
|
|
5825
|
+
system_message_content: str = ""
|
|
5826
|
+
# 3.3.1 First add the Agent description if provided
|
|
5827
|
+
if self.description is not None:
|
|
5828
|
+
system_message_content += f"{self.description}\n"
|
|
5829
|
+
# 3.3.2 Then add the Agent role if provided
|
|
5830
|
+
if self.role is not None:
|
|
5831
|
+
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
5832
|
+
# 3.3.4 Then add instructions for the Agent
|
|
5833
|
+
if len(instructions) > 0:
|
|
5834
|
+
system_message_content += "<instructions>"
|
|
5835
|
+
if len(instructions) > 1:
|
|
5836
|
+
for _upi in instructions:
|
|
5837
|
+
system_message_content += f"\n- {_upi}"
|
|
5838
|
+
else:
|
|
5839
|
+
system_message_content += "\n" + instructions[0]
|
|
5840
|
+
system_message_content += "\n</instructions>\n\n"
|
|
5841
|
+
# 3.3.6 Add additional information
|
|
5842
|
+
if len(additional_information) > 0:
|
|
5843
|
+
system_message_content += "<additional_information>"
|
|
5844
|
+
for _ai in additional_information:
|
|
5845
|
+
system_message_content += f"\n- {_ai}"
|
|
5846
|
+
system_message_content += "\n</additional_information>\n\n"
|
|
5847
|
+
# 3.3.7 Then add instructions for the tools
|
|
5848
|
+
if self._tool_instructions is not None:
|
|
5849
|
+
for _ti in self._tool_instructions:
|
|
5850
|
+
system_message_content += f"{_ti}\n"
|
|
5851
|
+
|
|
5852
|
+
# Format the system message with the session state variables
|
|
5853
|
+
if self.resolve_in_context:
|
|
5854
|
+
system_message_content = self._format_message_with_state_variables(
|
|
5855
|
+
system_message_content,
|
|
5856
|
+
user_id=user_id,
|
|
5857
|
+
session_state=session_state,
|
|
5858
|
+
dependencies=dependencies,
|
|
5859
|
+
metadata=metadata,
|
|
5860
|
+
)
|
|
5861
|
+
|
|
5862
|
+
# 3.3.7 Then add the expected output
|
|
5863
|
+
if self.expected_output is not None:
|
|
5864
|
+
system_message_content += f"<expected_output>\n{self.expected_output.strip()}\n</expected_output>\n\n"
|
|
5865
|
+
# 3.3.8 Then add additional context
|
|
5866
|
+
if self.additional_context is not None:
|
|
5867
|
+
system_message_content += f"{self.additional_context}\n"
|
|
5868
|
+
# 3.3.9 Then add memories to the system prompt
|
|
5869
|
+
if self.add_memories_to_context:
|
|
5870
|
+
_memory_manager_not_set = False
|
|
5871
|
+
if not user_id:
|
|
5872
|
+
user_id = "default"
|
|
5873
|
+
if self.memory_manager is None:
|
|
5874
|
+
self._set_memory_manager()
|
|
5875
|
+
_memory_manager_not_set = True
|
|
5876
|
+
|
|
5877
|
+
user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
5878
|
+
|
|
5879
|
+
if user_memories and len(user_memories) > 0:
|
|
5880
|
+
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
|
|
5881
|
+
system_message_content += "<memories_from_previous_interactions>"
|
|
5882
|
+
for _memory in user_memories: # type: ignore
|
|
5883
|
+
system_message_content += f"\n- {_memory.memory}"
|
|
5884
|
+
system_message_content += "\n</memories_from_previous_interactions>\n\n"
|
|
5885
|
+
system_message_content += (
|
|
5886
|
+
"Note: this information is from previous interactions and may be updated in this conversation. "
|
|
5887
|
+
"You should always prefer information from this conversation over the past memories.\n"
|
|
5888
|
+
)
|
|
5889
|
+
else:
|
|
5890
|
+
system_message_content += (
|
|
5891
|
+
"You have the capability to retain memories from previous interactions with the user, "
|
|
5892
|
+
"but have not had any interactions with the user yet.\n"
|
|
5893
|
+
)
|
|
5894
|
+
if _memory_manager_not_set:
|
|
5895
|
+
self.memory_manager = None
|
|
5896
|
+
|
|
5897
|
+
if self.enable_agentic_memory:
|
|
5898
|
+
system_message_content += (
|
|
5899
|
+
"\n<updating_user_memories>\n"
|
|
5900
|
+
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
|
|
5901
|
+
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
|
|
5902
|
+
"- Memories should include details that could personalize ongoing interactions with the user.\n"
|
|
5903
|
+
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
|
|
5904
|
+
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
|
|
5905
|
+
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
|
|
5906
|
+
"</updating_user_memories>\n\n"
|
|
5907
|
+
)
|
|
5908
|
+
|
|
5909
|
+
# 3.3.11 Then add a summary of the interaction to the system prompt
|
|
5910
|
+
if self.add_session_summary_to_context and session.summary is not None:
|
|
5911
|
+
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
|
|
5912
|
+
system_message_content += "<summary_of_previous_interactions>\n"
|
|
5913
|
+
system_message_content += session.summary.summary
|
|
5914
|
+
system_message_content += "\n</summary_of_previous_interactions>\n\n"
|
|
5915
|
+
system_message_content += (
|
|
5916
|
+
"Note: this information is from previous interactions and may be outdated. "
|
|
5917
|
+
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
|
|
5918
|
+
)
|
|
5919
|
+
|
|
5920
|
+
# 3.3.12 Add the system message from the Model
|
|
5921
|
+
system_message_from_model = self.model.get_system_message_for_model(self._tools_for_model)
|
|
5922
|
+
if system_message_from_model is not None:
|
|
5923
|
+
system_message_content += system_message_from_model
|
|
5924
|
+
|
|
5925
|
+
# 3.3.13 Add the JSON output prompt if output_schema is provided and the model does not support native structured outputs or JSON schema outputs
|
|
5926
|
+
# or if use_json_mode is True
|
|
5927
|
+
if (
|
|
5928
|
+
self.output_schema is not None
|
|
5929
|
+
and self.parser_model is None
|
|
5930
|
+
and not (
|
|
5931
|
+
(self.model.supports_native_structured_outputs or self.model.supports_json_schema_outputs)
|
|
5932
|
+
and (not self.use_json_mode or self.structured_outputs is True)
|
|
5933
|
+
)
|
|
5934
|
+
):
|
|
5935
|
+
system_message_content += f"{get_json_output_prompt(self.output_schema)}" # type: ignore
|
|
5936
|
+
|
|
5937
|
+
# 3.3.14 Add the response model format prompt if output_schema is provided
|
|
5938
|
+
if self.output_schema is not None and self.parser_model is not None:
|
|
5939
|
+
system_message_content += f"{get_response_model_format_prompt(self.output_schema)}"
|
|
5940
|
+
|
|
5941
|
+
# 3.3.15 Add the session state to the system message
|
|
5942
|
+
if self.add_session_state_to_context and session_state is not None:
|
|
5943
|
+
system_message_content += f"\n<session_state>\n{session_state}\n</session_state>\n\n"
|
|
5944
|
+
|
|
5945
|
+
# Return the system message
|
|
5946
|
+
return (
|
|
5947
|
+
Message(role=self.system_message_role, content=system_message_content.strip()) # type: ignore
|
|
5948
|
+
if system_message_content
|
|
5949
|
+
else None
|
|
5090
5950
|
)
|
|
5091
|
-
converted_msg = deepcopy(message)
|
|
5092
|
-
for var_name in format_variables.keys():
|
|
5093
|
-
# Only convert standalone {var_name} patterns, not nested ones
|
|
5094
|
-
pattern = r"\{" + re.escape(var_name) + r"\}"
|
|
5095
|
-
replacement = "${" + var_name + "}"
|
|
5096
|
-
converted_msg = re.sub(pattern, replacement, converted_msg)
|
|
5097
|
-
|
|
5098
|
-
# Use Template to safely substitute variables
|
|
5099
|
-
template = string.Template(converted_msg)
|
|
5100
|
-
try:
|
|
5101
|
-
result = template.safe_substitute(format_variables)
|
|
5102
|
-
return result
|
|
5103
|
-
except Exception as e:
|
|
5104
|
-
log_warning(f"Template substitution failed: {e}")
|
|
5105
|
-
return message
|
|
5106
5951
|
|
|
5107
|
-
def
|
|
5952
|
+
async def aget_system_message(
|
|
5108
5953
|
self,
|
|
5109
5954
|
session: AgentSession,
|
|
5110
5955
|
session_state: Optional[Dict[str, Any]] = None,
|
|
5111
5956
|
user_id: Optional[str] = None,
|
|
5112
5957
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
5113
5958
|
metadata: Optional[Dict[str, Any]] = None,
|
|
5114
|
-
add_session_state_to_context: Optional[bool] = None,
|
|
5115
5959
|
) -> Optional[Message]:
|
|
5116
5960
|
"""Return the system message for the Agent.
|
|
5117
5961
|
|
|
@@ -5225,7 +6069,7 @@ class Agent:
|
|
|
5225
6069
|
|
|
5226
6070
|
# 3.2.5 Add information about agentic filters if enabled
|
|
5227
6071
|
if self.knowledge is not None and self.enable_agentic_knowledge_filters:
|
|
5228
|
-
valid_filters = self.knowledge
|
|
6072
|
+
valid_filters = getattr(self.knowledge, "valid_metadata_filters", None)
|
|
5229
6073
|
if valid_filters:
|
|
5230
6074
|
valid_filters_str = ", ".join(valid_filters)
|
|
5231
6075
|
additional_information.append(
|
|
@@ -5300,11 +6144,14 @@ class Agent:
|
|
|
5300
6144
|
if self.memory_manager is None:
|
|
5301
6145
|
self._set_memory_manager()
|
|
5302
6146
|
_memory_manager_not_set = True
|
|
5303
|
-
|
|
6147
|
+
|
|
6148
|
+
if self._has_async_db():
|
|
6149
|
+
user_memories = await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
|
|
6150
|
+
else:
|
|
6151
|
+
user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
|
|
6152
|
+
|
|
5304
6153
|
if user_memories and len(user_memories) > 0:
|
|
5305
|
-
system_message_content +=
|
|
5306
|
-
"You have access to memories from previous interactions with the user that you can use:\n\n"
|
|
5307
|
-
)
|
|
6154
|
+
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
|
|
5308
6155
|
system_message_content += "<memories_from_previous_interactions>"
|
|
5309
6156
|
for _memory in user_memories: # type: ignore
|
|
5310
6157
|
system_message_content += f"\n- {_memory.memory}"
|
|
@@ -5366,7 +6213,7 @@ class Agent:
|
|
|
5366
6213
|
system_message_content += f"{get_response_model_format_prompt(self.output_schema)}"
|
|
5367
6214
|
|
|
5368
6215
|
# 3.3.15 Add the session state to the system message
|
|
5369
|
-
if add_session_state_to_context and session_state is not None:
|
|
6216
|
+
if self.add_session_state_to_context and session_state is not None:
|
|
5370
6217
|
system_message_content += self._get_formatted_session_state_for_system_message(session_state)
|
|
5371
6218
|
|
|
5372
6219
|
# Return the system message
|
|
@@ -5463,90 +6310,291 @@ class Agent:
|
|
|
5463
6310
|
log_warning(f"Failed to validate message: {e}")
|
|
5464
6311
|
raise Exception(f"Failed to validate message: {e}")
|
|
5465
6312
|
|
|
5466
|
-
# If message is provided as a BaseModel, convert it to a Message
|
|
5467
|
-
elif isinstance(input, BaseModel):
|
|
5468
|
-
try:
|
|
5469
|
-
# Create a user message with the BaseModel content
|
|
5470
|
-
content = input.model_dump_json(indent=2, exclude_none=True)
|
|
5471
|
-
return Message(role=self.user_message_role, content=content)
|
|
5472
|
-
except Exception as e:
|
|
5473
|
-
log_warning(f"Failed to convert BaseModel to message: {e}")
|
|
5474
|
-
raise Exception(f"Failed to convert BaseModel to message: {e}")
|
|
5475
|
-
else:
|
|
5476
|
-
user_msg_content = input
|
|
5477
|
-
if self.add_knowledge_to_context:
|
|
5478
|
-
if isinstance(input, str):
|
|
5479
|
-
user_msg_content = input
|
|
5480
|
-
elif callable(input):
|
|
5481
|
-
user_msg_content = input(agent=self)
|
|
5482
|
-
else:
|
|
5483
|
-
raise Exception("message must be a string or a callable when add_references is True")
|
|
6313
|
+
# If message is provided as a BaseModel, convert it to a Message
|
|
6314
|
+
elif isinstance(input, BaseModel):
|
|
6315
|
+
try:
|
|
6316
|
+
# Create a user message with the BaseModel content
|
|
6317
|
+
content = input.model_dump_json(indent=2, exclude_none=True)
|
|
6318
|
+
return Message(role=self.user_message_role, content=content)
|
|
6319
|
+
except Exception as e:
|
|
6320
|
+
log_warning(f"Failed to convert BaseModel to message: {e}")
|
|
6321
|
+
raise Exception(f"Failed to convert BaseModel to message: {e}")
|
|
6322
|
+
else:
|
|
6323
|
+
user_msg_content = input
|
|
6324
|
+
if self.add_knowledge_to_context:
|
|
6325
|
+
if isinstance(input, str):
|
|
6326
|
+
user_msg_content = input
|
|
6327
|
+
elif callable(input):
|
|
6328
|
+
user_msg_content = input(agent=self)
|
|
6329
|
+
else:
|
|
6330
|
+
raise Exception("message must be a string or a callable when add_references is True")
|
|
6331
|
+
|
|
6332
|
+
try:
|
|
6333
|
+
retrieval_timer = Timer()
|
|
6334
|
+
retrieval_timer.start()
|
|
6335
|
+
docs_from_knowledge = self.get_relevant_docs_from_knowledge(
|
|
6336
|
+
query=user_msg_content, filters=knowledge_filters, **kwargs
|
|
6337
|
+
)
|
|
6338
|
+
if docs_from_knowledge is not None:
|
|
6339
|
+
references = MessageReferences(
|
|
6340
|
+
query=user_msg_content,
|
|
6341
|
+
references=docs_from_knowledge,
|
|
6342
|
+
time=round(retrieval_timer.elapsed, 4),
|
|
6343
|
+
)
|
|
6344
|
+
# Add the references to the run_response
|
|
6345
|
+
if run_response.references is None:
|
|
6346
|
+
run_response.references = []
|
|
6347
|
+
run_response.references.append(references)
|
|
6348
|
+
retrieval_timer.stop()
|
|
6349
|
+
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
|
|
6350
|
+
except Exception as e:
|
|
6351
|
+
log_warning(f"Failed to get references: {e}")
|
|
6352
|
+
|
|
6353
|
+
if self.resolve_in_context:
|
|
6354
|
+
user_msg_content = self._format_message_with_state_variables(
|
|
6355
|
+
user_msg_content,
|
|
6356
|
+
user_id=user_id,
|
|
6357
|
+
session_state=session_state,
|
|
6358
|
+
dependencies=dependencies,
|
|
6359
|
+
metadata=metadata,
|
|
6360
|
+
)
|
|
6361
|
+
|
|
6362
|
+
# Convert to string for concatenation operations
|
|
6363
|
+
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
|
|
6364
|
+
|
|
6365
|
+
# 4.1 Add knowledge references to user message
|
|
6366
|
+
if (
|
|
6367
|
+
self.add_knowledge_to_context
|
|
6368
|
+
and references is not None
|
|
6369
|
+
and references.references is not None
|
|
6370
|
+
and len(references.references) > 0
|
|
6371
|
+
):
|
|
6372
|
+
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
|
|
6373
|
+
user_msg_content_str += "<references>\n"
|
|
6374
|
+
user_msg_content_str += self._convert_documents_to_string(references.references) + "\n"
|
|
6375
|
+
user_msg_content_str += "</references>"
|
|
6376
|
+
# 4.2 Add context to user message
|
|
6377
|
+
if add_dependencies_to_context and dependencies is not None:
|
|
6378
|
+
user_msg_content_str += "\n\n<additional context>\n"
|
|
6379
|
+
user_msg_content_str += self._convert_dependencies_to_string(dependencies) + "\n"
|
|
6380
|
+
user_msg_content_str += "</additional context>"
|
|
6381
|
+
|
|
6382
|
+
# Use the string version for the final content
|
|
6383
|
+
user_msg_content = user_msg_content_str
|
|
6384
|
+
|
|
6385
|
+
# Return the user message
|
|
6386
|
+
return Message(
|
|
6387
|
+
role=self.user_message_role,
|
|
6388
|
+
content=user_msg_content,
|
|
6389
|
+
audio=None if not self.send_media_to_model else audio,
|
|
6390
|
+
images=None if not self.send_media_to_model else images,
|
|
6391
|
+
videos=None if not self.send_media_to_model else videos,
|
|
6392
|
+
files=None if not self.send_media_to_model else files,
|
|
6393
|
+
**kwargs,
|
|
6394
|
+
)
|
|
6395
|
+
|
|
6396
|
+
def _get_run_messages(
|
|
6397
|
+
self,
|
|
6398
|
+
*,
|
|
6399
|
+
run_response: RunOutput,
|
|
6400
|
+
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
6401
|
+
session: AgentSession,
|
|
6402
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
6403
|
+
user_id: Optional[str] = None,
|
|
6404
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
6405
|
+
images: Optional[Sequence[Image]] = None,
|
|
6406
|
+
videos: Optional[Sequence[Video]] = None,
|
|
6407
|
+
files: Optional[Sequence[File]] = None,
|
|
6408
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
6409
|
+
add_history_to_context: Optional[bool] = None,
|
|
6410
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
6411
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
6412
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
6413
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
6414
|
+
**kwargs: Any,
|
|
6415
|
+
) -> RunMessages:
|
|
6416
|
+
"""This function returns a RunMessages object with the following attributes:
|
|
6417
|
+
- system_message: The system message for this run
|
|
6418
|
+
- user_message: The user message for this run
|
|
6419
|
+
- messages: List of messages to send to the model
|
|
6420
|
+
|
|
6421
|
+
To build the RunMessages object:
|
|
6422
|
+
1. Add system message to run_messages
|
|
6423
|
+
2. Add extra messages to run_messages if provided
|
|
6424
|
+
3. Add history to run_messages
|
|
6425
|
+
4. Add user message to run_messages (if input is single content)
|
|
6426
|
+
5. Add input messages to run_messages if provided (if input is List[Message])
|
|
6427
|
+
|
|
6428
|
+
Returns:
|
|
6429
|
+
RunMessages object with the following attributes:
|
|
6430
|
+
- system_message: The system message for this run
|
|
6431
|
+
- user_message: The user message for this run
|
|
6432
|
+
- messages: List of all messages to send to the model
|
|
6433
|
+
|
|
6434
|
+
Typical usage:
|
|
6435
|
+
run_messages = self._get_run_messages(
|
|
6436
|
+
input=input, session_id=session_id, user_id=user_id, audio=audio, images=images, videos=videos, files=files, **kwargs
|
|
6437
|
+
)
|
|
6438
|
+
"""
|
|
6439
|
+
|
|
6440
|
+
# Initialize the RunMessages object (no media here - that's in RunInput now)
|
|
6441
|
+
run_messages = RunMessages()
|
|
6442
|
+
|
|
6443
|
+
# 1. Add system message to run_messages
|
|
6444
|
+
system_message = self.get_system_message(
|
|
6445
|
+
session=session,
|
|
6446
|
+
session_state=session_state,
|
|
6447
|
+
user_id=user_id,
|
|
6448
|
+
dependencies=dependencies,
|
|
6449
|
+
metadata=metadata,
|
|
6450
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
6451
|
+
)
|
|
6452
|
+
if system_message is not None:
|
|
6453
|
+
run_messages.system_message = system_message
|
|
6454
|
+
run_messages.messages.append(system_message)
|
|
6455
|
+
|
|
6456
|
+
# 2. Add extra messages to run_messages if provided
|
|
6457
|
+
if self.additional_input is not None:
|
|
6458
|
+
messages_to_add_to_run_response: List[Message] = []
|
|
6459
|
+
if run_messages.extra_messages is None:
|
|
6460
|
+
run_messages.extra_messages = []
|
|
6461
|
+
|
|
6462
|
+
for _m in self.additional_input:
|
|
6463
|
+
if isinstance(_m, Message):
|
|
6464
|
+
messages_to_add_to_run_response.append(_m)
|
|
6465
|
+
run_messages.messages.append(_m)
|
|
6466
|
+
run_messages.extra_messages.append(_m)
|
|
6467
|
+
elif isinstance(_m, dict):
|
|
6468
|
+
try:
|
|
6469
|
+
_m_parsed = Message.model_validate(_m)
|
|
6470
|
+
messages_to_add_to_run_response.append(_m_parsed)
|
|
6471
|
+
run_messages.messages.append(_m_parsed)
|
|
6472
|
+
run_messages.extra_messages.append(_m_parsed)
|
|
6473
|
+
except Exception as e:
|
|
6474
|
+
log_warning(f"Failed to validate message: {e}")
|
|
6475
|
+
# Add the extra messages to the run_response
|
|
6476
|
+
if len(messages_to_add_to_run_response) > 0:
|
|
6477
|
+
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
|
|
6478
|
+
if run_response.additional_input is None:
|
|
6479
|
+
run_response.additional_input = messages_to_add_to_run_response
|
|
6480
|
+
else:
|
|
6481
|
+
run_response.additional_input.extend(messages_to_add_to_run_response)
|
|
6482
|
+
|
|
6483
|
+
# 3. Add history to run_messages
|
|
6484
|
+
if add_history_to_context:
|
|
6485
|
+
from copy import deepcopy
|
|
6486
|
+
|
|
6487
|
+
# Only skip messages from history when system_message_role is NOT a standard conversation role.
|
|
6488
|
+
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
|
|
6489
|
+
# to preserve conversation continuity.
|
|
6490
|
+
skip_role = (
|
|
6491
|
+
self.system_message_role if self.system_message_role not in ["user", "assistant", "tool"] else None
|
|
6492
|
+
)
|
|
6493
|
+
|
|
6494
|
+
history: List[Message] = session.get_messages_from_last_n_runs(
|
|
6495
|
+
last_n=self.num_history_runs,
|
|
6496
|
+
skip_role=skip_role,
|
|
6497
|
+
agent_id=self.id if self.team_id is not None else None,
|
|
6498
|
+
)
|
|
6499
|
+
|
|
6500
|
+
if len(history) > 0:
|
|
6501
|
+
# Create a deep copy of the history messages to avoid modifying the original messages
|
|
6502
|
+
history_copy = [deepcopy(msg) for msg in history]
|
|
6503
|
+
|
|
6504
|
+
# Tag each message as coming from history
|
|
6505
|
+
for _msg in history_copy:
|
|
6506
|
+
_msg.from_history = True
|
|
6507
|
+
|
|
6508
|
+
log_debug(f"Adding {len(history_copy)} messages from history")
|
|
6509
|
+
|
|
6510
|
+
run_messages.messages += history_copy
|
|
6511
|
+
|
|
6512
|
+
# 4. Add user message to run_messages
|
|
6513
|
+
user_message: Optional[Message] = None
|
|
6514
|
+
|
|
6515
|
+
# 4.1 Build user message if input is None, str or list and not a list of Message/dict objects
|
|
6516
|
+
if (
|
|
6517
|
+
input is None
|
|
6518
|
+
or isinstance(input, str)
|
|
6519
|
+
or (
|
|
6520
|
+
isinstance(input, list)
|
|
6521
|
+
and not (
|
|
6522
|
+
len(input) > 0
|
|
6523
|
+
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
|
|
6524
|
+
)
|
|
6525
|
+
)
|
|
6526
|
+
):
|
|
6527
|
+
user_message = self._get_user_message(
|
|
6528
|
+
run_response=run_response,
|
|
6529
|
+
session_state=session_state,
|
|
6530
|
+
input=input,
|
|
6531
|
+
audio=audio,
|
|
6532
|
+
images=images,
|
|
6533
|
+
videos=videos,
|
|
6534
|
+
files=files,
|
|
6535
|
+
knowledge_filters=knowledge_filters,
|
|
6536
|
+
dependencies=dependencies,
|
|
6537
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
6538
|
+
metadata=metadata,
|
|
6539
|
+
**kwargs,
|
|
6540
|
+
)
|
|
6541
|
+
|
|
6542
|
+
# 4.2 If input is provided as a Message, use it directly
|
|
6543
|
+
elif isinstance(input, Message):
|
|
6544
|
+
user_message = input
|
|
5484
6545
|
|
|
5485
|
-
|
|
5486
|
-
|
|
5487
|
-
|
|
5488
|
-
|
|
5489
|
-
|
|
5490
|
-
)
|
|
5491
|
-
if docs_from_knowledge is not None:
|
|
5492
|
-
references = MessageReferences(
|
|
5493
|
-
query=user_msg_content,
|
|
5494
|
-
references=docs_from_knowledge,
|
|
5495
|
-
time=round(retrieval_timer.elapsed, 4),
|
|
5496
|
-
)
|
|
5497
|
-
# Add the references to the run_response
|
|
5498
|
-
if run_response.references is None:
|
|
5499
|
-
run_response.references = []
|
|
5500
|
-
run_response.references.append(references)
|
|
5501
|
-
retrieval_timer.stop()
|
|
5502
|
-
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
|
|
5503
|
-
except Exception as e:
|
|
5504
|
-
log_warning(f"Failed to get references: {e}")
|
|
6546
|
+
# 4.3 If input is provided as a dict, try to validate it as a Message
|
|
6547
|
+
elif isinstance(input, dict):
|
|
6548
|
+
try:
|
|
6549
|
+
if self.input_schema and is_typed_dict(self.input_schema):
|
|
6550
|
+
import json
|
|
5505
6551
|
|
|
5506
|
-
|
|
5507
|
-
|
|
5508
|
-
|
|
5509
|
-
|
|
5510
|
-
|
|
5511
|
-
|
|
5512
|
-
metadata=metadata,
|
|
5513
|
-
)
|
|
6552
|
+
content = json.dumps(input, indent=2, ensure_ascii=False)
|
|
6553
|
+
user_message = Message(role=self.user_message_role, content=content)
|
|
6554
|
+
else:
|
|
6555
|
+
user_message = Message.model_validate(input)
|
|
6556
|
+
except Exception as e:
|
|
6557
|
+
log_warning(f"Failed to validate message: {e}")
|
|
5514
6558
|
|
|
5515
|
-
|
|
5516
|
-
|
|
6559
|
+
# 4.4 If input is provided as a BaseModel, convert it to a Message
|
|
6560
|
+
elif isinstance(input, BaseModel):
|
|
6561
|
+
try:
|
|
6562
|
+
# Create a user message with the BaseModel content
|
|
6563
|
+
content = input.model_dump_json(indent=2, exclude_none=True)
|
|
6564
|
+
user_message = Message(role=self.user_message_role, content=content)
|
|
6565
|
+
except Exception as e:
|
|
6566
|
+
log_warning(f"Failed to convert BaseModel to message: {e}")
|
|
5517
6567
|
|
|
5518
|
-
|
|
5519
|
-
|
|
5520
|
-
|
|
5521
|
-
|
|
5522
|
-
|
|
5523
|
-
|
|
5524
|
-
|
|
5525
|
-
|
|
5526
|
-
|
|
5527
|
-
|
|
5528
|
-
|
|
5529
|
-
|
|
5530
|
-
|
|
5531
|
-
|
|
5532
|
-
|
|
5533
|
-
|
|
6568
|
+
# 5. Add input messages to run_messages if provided (List[Message] or List[Dict])
|
|
6569
|
+
if (
|
|
6570
|
+
isinstance(input, list)
|
|
6571
|
+
and len(input) > 0
|
|
6572
|
+
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
|
|
6573
|
+
):
|
|
6574
|
+
for _m in input:
|
|
6575
|
+
if isinstance(_m, Message):
|
|
6576
|
+
run_messages.messages.append(_m)
|
|
6577
|
+
if run_messages.extra_messages is None:
|
|
6578
|
+
run_messages.extra_messages = []
|
|
6579
|
+
run_messages.extra_messages.append(_m)
|
|
6580
|
+
elif isinstance(_m, dict):
|
|
6581
|
+
try:
|
|
6582
|
+
msg = Message.model_validate(_m)
|
|
6583
|
+
run_messages.messages.append(msg)
|
|
6584
|
+
if run_messages.extra_messages is None:
|
|
6585
|
+
run_messages.extra_messages = []
|
|
6586
|
+
run_messages.extra_messages.append(msg)
|
|
6587
|
+
except Exception as e:
|
|
6588
|
+
log_warning(f"Failed to validate message: {e}")
|
|
5534
6589
|
|
|
5535
|
-
|
|
5536
|
-
|
|
6590
|
+
# Add user message to run_messages
|
|
6591
|
+
if user_message is not None:
|
|
6592
|
+
run_messages.user_message = user_message
|
|
6593
|
+
run_messages.messages.append(user_message)
|
|
5537
6594
|
|
|
5538
|
-
|
|
5539
|
-
return Message(
|
|
5540
|
-
role=self.user_message_role,
|
|
5541
|
-
content=user_msg_content,
|
|
5542
|
-
audio=None if not self.send_media_to_model else audio,
|
|
5543
|
-
images=None if not self.send_media_to_model else images,
|
|
5544
|
-
videos=None if not self.send_media_to_model else videos,
|
|
5545
|
-
files=None if not self.send_media_to_model else files,
|
|
5546
|
-
**kwargs,
|
|
5547
|
-
)
|
|
6595
|
+
return run_messages
|
|
5548
6596
|
|
|
5549
|
-
def
|
|
6597
|
+
async def _aget_run_messages(
|
|
5550
6598
|
self,
|
|
5551
6599
|
*,
|
|
5552
6600
|
run_response: RunOutput,
|
|
@@ -5560,7 +6608,7 @@ class Agent:
|
|
|
5560
6608
|
files: Optional[Sequence[File]] = None,
|
|
5561
6609
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
5562
6610
|
add_history_to_context: Optional[bool] = None,
|
|
5563
|
-
|
|
6611
|
+
run_dependencies: Optional[Dict[str, Any]] = None,
|
|
5564
6612
|
add_dependencies_to_context: Optional[bool] = None,
|
|
5565
6613
|
add_session_state_to_context: Optional[bool] = None,
|
|
5566
6614
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -5594,13 +6642,12 @@ class Agent:
|
|
|
5594
6642
|
run_messages = RunMessages()
|
|
5595
6643
|
|
|
5596
6644
|
# 1. Add system message to run_messages
|
|
5597
|
-
system_message = self.
|
|
6645
|
+
system_message = await self.aget_system_message(
|
|
5598
6646
|
session=session,
|
|
5599
6647
|
session_state=session_state,
|
|
5600
6648
|
user_id=user_id,
|
|
5601
|
-
dependencies=
|
|
6649
|
+
dependencies=run_dependencies,
|
|
5602
6650
|
metadata=metadata,
|
|
5603
|
-
add_session_state_to_context=add_session_state_to_context,
|
|
5604
6651
|
)
|
|
5605
6652
|
if system_message is not None:
|
|
5606
6653
|
run_messages.system_message = system_message
|
|
@@ -5637,16 +6684,9 @@ class Agent:
|
|
|
5637
6684
|
if add_history_to_context:
|
|
5638
6685
|
from copy import deepcopy
|
|
5639
6686
|
|
|
5640
|
-
# Only skip messages from history when system_message_role is NOT a standard conversation role.
|
|
5641
|
-
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
|
|
5642
|
-
# to preserve conversation continuity.
|
|
5643
|
-
skip_role = (
|
|
5644
|
-
self.system_message_role if self.system_message_role not in ["user", "assistant", "tool"] else None
|
|
5645
|
-
)
|
|
5646
|
-
|
|
5647
6687
|
history: List[Message] = session.get_messages_from_last_n_runs(
|
|
5648
6688
|
last_n=self.num_history_runs,
|
|
5649
|
-
skip_role=
|
|
6689
|
+
skip_role=self.system_message_role,
|
|
5650
6690
|
agent_id=self.id if self.team_id is not None else None,
|
|
5651
6691
|
)
|
|
5652
6692
|
|
|
@@ -5686,7 +6726,7 @@ class Agent:
|
|
|
5686
6726
|
videos=videos,
|
|
5687
6727
|
files=files,
|
|
5688
6728
|
knowledge_filters=knowledge_filters,
|
|
5689
|
-
|
|
6729
|
+
run_dependencies=run_dependencies,
|
|
5690
6730
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
5691
6731
|
metadata=metadata,
|
|
5692
6732
|
**kwargs,
|
|
@@ -5699,13 +6739,7 @@ class Agent:
|
|
|
5699
6739
|
# 4.3 If input is provided as a dict, try to validate it as a Message
|
|
5700
6740
|
elif isinstance(input, dict):
|
|
5701
6741
|
try:
|
|
5702
|
-
|
|
5703
|
-
import json
|
|
5704
|
-
|
|
5705
|
-
content = json.dumps(input, indent=2, ensure_ascii=False)
|
|
5706
|
-
user_message = Message(role=self.user_message_role, content=content)
|
|
5707
|
-
else:
|
|
5708
|
-
user_message = Message.model_validate(input)
|
|
6742
|
+
user_message = Message.model_validate(input)
|
|
5709
6743
|
except Exception as e:
|
|
5710
6744
|
log_warning(f"Failed to validate message: {e}")
|
|
5711
6745
|
|
|
@@ -6264,12 +7298,15 @@ class Agent:
|
|
|
6264
7298
|
|
|
6265
7299
|
# If a reasoning model is provided, use it to generate reasoning
|
|
6266
7300
|
if reasoning_model_provided:
|
|
7301
|
+
from agno.reasoning.anthropic import is_anthropic_reasoning_model
|
|
6267
7302
|
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
|
|
6268
7303
|
from agno.reasoning.deepseek import is_deepseek_reasoning_model
|
|
7304
|
+
from agno.reasoning.gemini import is_gemini_reasoning_model
|
|
6269
7305
|
from agno.reasoning.groq import is_groq_reasoning_model
|
|
6270
7306
|
from agno.reasoning.helpers import get_reasoning_agent
|
|
6271
7307
|
from agno.reasoning.ollama import is_ollama_reasoning_model
|
|
6272
7308
|
from agno.reasoning.openai import is_openai_reasoning_model
|
|
7309
|
+
from agno.reasoning.vertexai import is_vertexai_reasoning_model
|
|
6273
7310
|
|
|
6274
7311
|
reasoning_agent = self.reasoning_agent or get_reasoning_agent(
|
|
6275
7312
|
reasoning_model=reasoning_model,
|
|
@@ -6285,8 +7322,20 @@ class Agent:
|
|
|
6285
7322
|
is_openai = is_openai_reasoning_model(reasoning_model)
|
|
6286
7323
|
is_ollama = is_ollama_reasoning_model(reasoning_model)
|
|
6287
7324
|
is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
|
|
7325
|
+
is_gemini = is_gemini_reasoning_model(reasoning_model)
|
|
7326
|
+
is_anthropic = is_anthropic_reasoning_model(reasoning_model)
|
|
7327
|
+
is_vertexai = is_vertexai_reasoning_model(reasoning_model)
|
|
6288
7328
|
|
|
6289
|
-
if
|
|
7329
|
+
if (
|
|
7330
|
+
is_deepseek
|
|
7331
|
+
or is_groq
|
|
7332
|
+
or is_openai
|
|
7333
|
+
or is_ollama
|
|
7334
|
+
or is_ai_foundry
|
|
7335
|
+
or is_gemini
|
|
7336
|
+
or is_anthropic
|
|
7337
|
+
or is_vertexai
|
|
7338
|
+
):
|
|
6290
7339
|
reasoning_message: Optional[Message] = None
|
|
6291
7340
|
if is_deepseek:
|
|
6292
7341
|
from agno.reasoning.deepseek import get_deepseek_reasoning
|
|
@@ -6323,6 +7372,27 @@ class Agent:
|
|
|
6323
7372
|
reasoning_message = get_ai_foundry_reasoning(
|
|
6324
7373
|
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
6325
7374
|
)
|
|
7375
|
+
elif is_gemini:
|
|
7376
|
+
from agno.reasoning.gemini import get_gemini_reasoning
|
|
7377
|
+
|
|
7378
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
7379
|
+
reasoning_message = get_gemini_reasoning(
|
|
7380
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7381
|
+
)
|
|
7382
|
+
elif is_anthropic:
|
|
7383
|
+
from agno.reasoning.anthropic import get_anthropic_reasoning
|
|
7384
|
+
|
|
7385
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
7386
|
+
reasoning_message = get_anthropic_reasoning(
|
|
7387
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7388
|
+
)
|
|
7389
|
+
elif is_vertexai:
|
|
7390
|
+
from agno.reasoning.vertexai import get_vertexai_reasoning
|
|
7391
|
+
|
|
7392
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
7393
|
+
reasoning_message = get_vertexai_reasoning(
|
|
7394
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7395
|
+
)
|
|
6326
7396
|
|
|
6327
7397
|
if reasoning_message is None:
|
|
6328
7398
|
log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
|
|
@@ -6496,12 +7566,15 @@ class Agent:
|
|
|
6496
7566
|
|
|
6497
7567
|
# If a reasoning model is provided, use it to generate reasoning
|
|
6498
7568
|
if reasoning_model_provided:
|
|
7569
|
+
from agno.reasoning.anthropic import is_anthropic_reasoning_model
|
|
6499
7570
|
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
|
|
6500
7571
|
from agno.reasoning.deepseek import is_deepseek_reasoning_model
|
|
7572
|
+
from agno.reasoning.gemini import is_gemini_reasoning_model
|
|
6501
7573
|
from agno.reasoning.groq import is_groq_reasoning_model
|
|
6502
7574
|
from agno.reasoning.helpers import get_reasoning_agent
|
|
6503
7575
|
from agno.reasoning.ollama import is_ollama_reasoning_model
|
|
6504
7576
|
from agno.reasoning.openai import is_openai_reasoning_model
|
|
7577
|
+
from agno.reasoning.vertexai import is_vertexai_reasoning_model
|
|
6505
7578
|
|
|
6506
7579
|
reasoning_agent = self.reasoning_agent or get_reasoning_agent(
|
|
6507
7580
|
reasoning_model=reasoning_model,
|
|
@@ -6517,8 +7590,20 @@ class Agent:
|
|
|
6517
7590
|
is_openai = is_openai_reasoning_model(reasoning_model)
|
|
6518
7591
|
is_ollama = is_ollama_reasoning_model(reasoning_model)
|
|
6519
7592
|
is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
|
|
7593
|
+
is_gemini = is_gemini_reasoning_model(reasoning_model)
|
|
7594
|
+
is_anthropic = is_anthropic_reasoning_model(reasoning_model)
|
|
7595
|
+
is_vertexai = is_vertexai_reasoning_model(reasoning_model)
|
|
6520
7596
|
|
|
6521
|
-
if
|
|
7597
|
+
if (
|
|
7598
|
+
is_deepseek
|
|
7599
|
+
or is_groq
|
|
7600
|
+
or is_openai
|
|
7601
|
+
or is_ollama
|
|
7602
|
+
or is_ai_foundry
|
|
7603
|
+
or is_gemini
|
|
7604
|
+
or is_anthropic
|
|
7605
|
+
or is_vertexai
|
|
7606
|
+
):
|
|
6522
7607
|
reasoning_message: Optional[Message] = None
|
|
6523
7608
|
if is_deepseek:
|
|
6524
7609
|
from agno.reasoning.deepseek import aget_deepseek_reasoning
|
|
@@ -6555,6 +7640,27 @@ class Agent:
|
|
|
6555
7640
|
reasoning_message = get_ai_foundry_reasoning(
|
|
6556
7641
|
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
6557
7642
|
)
|
|
7643
|
+
elif is_gemini:
|
|
7644
|
+
from agno.reasoning.gemini import aget_gemini_reasoning
|
|
7645
|
+
|
|
7646
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
7647
|
+
reasoning_message = await aget_gemini_reasoning(
|
|
7648
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7649
|
+
)
|
|
7650
|
+
elif is_anthropic:
|
|
7651
|
+
from agno.reasoning.anthropic import aget_anthropic_reasoning
|
|
7652
|
+
|
|
7653
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
7654
|
+
reasoning_message = await aget_anthropic_reasoning(
|
|
7655
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7656
|
+
)
|
|
7657
|
+
elif is_vertexai:
|
|
7658
|
+
from agno.reasoning.vertexai import aget_vertexai_reasoning
|
|
7659
|
+
|
|
7660
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
7661
|
+
reasoning_message = await aget_vertexai_reasoning(
|
|
7662
|
+
reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
|
|
7663
|
+
)
|
|
6558
7664
|
|
|
6559
7665
|
if reasoning_message is None:
|
|
6560
7666
|
log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
|
|
@@ -7284,6 +8390,8 @@ class Agent:
|
|
|
7284
8390
|
if self.db is None:
|
|
7285
8391
|
return "Previous session messages not available"
|
|
7286
8392
|
|
|
8393
|
+
self.db = cast(BaseDb, self.db)
|
|
8394
|
+
|
|
7287
8395
|
selected_sessions = self.db.get_sessions(
|
|
7288
8396
|
session_type=SessionType.AGENT, limit=num_history_sessions, user_id=user_id
|
|
7289
8397
|
)
|
|
@@ -7321,6 +8429,69 @@ class Agent:
|
|
|
7321
8429
|
|
|
7322
8430
|
return get_previous_session_messages
|
|
7323
8431
|
|
|
8432
|
+
async def _aget_previous_sessions_messages_function(self, num_history_sessions: Optional[int] = 2) -> Callable:
|
|
8433
|
+
"""Factory function to create a get_previous_session_messages function.
|
|
8434
|
+
|
|
8435
|
+
Args:
|
|
8436
|
+
num_history_sessions: The last n sessions to be taken from db
|
|
8437
|
+
|
|
8438
|
+
Returns:
|
|
8439
|
+
Callable: A function that retrieves messages from previous sessions
|
|
8440
|
+
"""
|
|
8441
|
+
|
|
8442
|
+
async def aget_previous_session_messages() -> str:
|
|
8443
|
+
"""Use this function to retrieve messages from previous chat sessions.
|
|
8444
|
+
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
|
|
8445
|
+
|
|
8446
|
+
Returns:
|
|
8447
|
+
str: JSON formatted list of message pairs from previous sessions
|
|
8448
|
+
"""
|
|
8449
|
+
# TODO: Review and Test this function
|
|
8450
|
+
import json
|
|
8451
|
+
|
|
8452
|
+
if self.db is None:
|
|
8453
|
+
return "Previous session messages not available"
|
|
8454
|
+
|
|
8455
|
+
if isinstance(self.db, AsyncBaseDb):
|
|
8456
|
+
selected_sessions = await self.db.get_sessions(
|
|
8457
|
+
session_type=SessionType.AGENT, limit=num_history_sessions
|
|
8458
|
+
)
|
|
8459
|
+
else:
|
|
8460
|
+
selected_sessions = self.db.get_sessions(session_type=SessionType.AGENT, limit=num_history_sessions)
|
|
8461
|
+
|
|
8462
|
+
all_messages = []
|
|
8463
|
+
seen_message_pairs = set()
|
|
8464
|
+
|
|
8465
|
+
for session in selected_sessions:
|
|
8466
|
+
if isinstance(session, AgentSession) and session.runs:
|
|
8467
|
+
message_count = 0
|
|
8468
|
+
for run in session.runs:
|
|
8469
|
+
messages = run.messages
|
|
8470
|
+
if messages is not None:
|
|
8471
|
+
for i in range(0, len(messages) - 1, 2):
|
|
8472
|
+
if i + 1 < len(messages):
|
|
8473
|
+
try:
|
|
8474
|
+
user_msg = messages[i]
|
|
8475
|
+
assistant_msg = messages[i + 1]
|
|
8476
|
+
user_content = user_msg.content
|
|
8477
|
+
assistant_content = assistant_msg.content
|
|
8478
|
+
if user_content is None or assistant_content is None:
|
|
8479
|
+
continue # Skip this pair if either message has no content
|
|
8480
|
+
|
|
8481
|
+
msg_pair_id = f"{user_content}:{assistant_content}"
|
|
8482
|
+
if msg_pair_id not in seen_message_pairs:
|
|
8483
|
+
seen_message_pairs.add(msg_pair_id)
|
|
8484
|
+
all_messages.append(Message.model_validate(user_msg))
|
|
8485
|
+
all_messages.append(Message.model_validate(assistant_msg))
|
|
8486
|
+
message_count += 1
|
|
8487
|
+
except Exception as e:
|
|
8488
|
+
log_warning(f"Error processing message pair: {e}")
|
|
8489
|
+
continue
|
|
8490
|
+
|
|
8491
|
+
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
|
|
8492
|
+
|
|
8493
|
+
return aget_previous_session_messages
|
|
8494
|
+
|
|
7324
8495
|
###########################################################################
|
|
7325
8496
|
# Print Response
|
|
7326
8497
|
###########################################################################
|
|
@@ -7354,6 +8525,11 @@ class Agent:
|
|
|
7354
8525
|
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
7355
8526
|
**kwargs: Any,
|
|
7356
8527
|
) -> None:
|
|
8528
|
+
if self._has_async_db():
|
|
8529
|
+
raise Exception(
|
|
8530
|
+
"This method is not supported with an async DB. Please use the async version of this method."
|
|
8531
|
+
)
|
|
8532
|
+
|
|
7357
8533
|
if not tags_to_include_in_markdown:
|
|
7358
8534
|
tags_to_include_in_markdown = {"think", "thinking"}
|
|
7359
8535
|
|
|
@@ -7655,8 +8831,6 @@ class Agent:
|
|
|
7655
8831
|
run_response.input.audios = []
|
|
7656
8832
|
run_response.input.files = []
|
|
7657
8833
|
|
|
7658
|
-
# 2. RunOutput artifact media are skipped since we don't store them when store_media=False
|
|
7659
|
-
|
|
7660
8834
|
# 3. Scrub media from all messages
|
|
7661
8835
|
if run_response.messages:
|
|
7662
8836
|
for message in run_response.messages:
|
|
@@ -7685,6 +8859,69 @@ class Agent:
|
|
|
7685
8859
|
message.image_output = None
|
|
7686
8860
|
message.video_output = None
|
|
7687
8861
|
|
|
8862
|
+
def _scrub_tool_results_from_run_output(self, run_response: RunOutput) -> None:
|
|
8863
|
+
"""
|
|
8864
|
+
Remove all tool-related data from RunOutput when store_tool_messages=False.
|
|
8865
|
+
This removes both the tool call and its corresponding result to maintain API consistency.
|
|
8866
|
+
"""
|
|
8867
|
+
if not run_response.messages:
|
|
8868
|
+
return
|
|
8869
|
+
|
|
8870
|
+
# Step 1: Collect all tool_call_ids from tool result messages
|
|
8871
|
+
tool_call_ids_to_remove = set()
|
|
8872
|
+
for message in run_response.messages:
|
|
8873
|
+
if message.role == "tool" and message.tool_call_id:
|
|
8874
|
+
tool_call_ids_to_remove.add(message.tool_call_id)
|
|
8875
|
+
|
|
8876
|
+
# Step 2: Remove tool result messages (role="tool")
|
|
8877
|
+
run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
|
|
8878
|
+
|
|
8879
|
+
# Step 3: Remove the assistant messages related to the scrubbed tool calls
|
|
8880
|
+
filtered_messages = []
|
|
8881
|
+
for message in run_response.messages:
|
|
8882
|
+
# Check if this assistant message made any of the tool calls we're removing
|
|
8883
|
+
should_remove = False
|
|
8884
|
+
if message.role == "assistant" and message.tool_calls:
|
|
8885
|
+
for tool_call in message.tool_calls:
|
|
8886
|
+
if tool_call.get("id") in tool_call_ids_to_remove:
|
|
8887
|
+
should_remove = True
|
|
8888
|
+
break
|
|
8889
|
+
|
|
8890
|
+
if not should_remove:
|
|
8891
|
+
filtered_messages.append(message)
|
|
8892
|
+
|
|
8893
|
+
run_response.messages = filtered_messages
|
|
8894
|
+
|
|
8895
|
+
def _scrub_history_messages_from_run_output(self, run_response: RunOutput) -> None:
|
|
8896
|
+
"""
|
|
8897
|
+
Remove all history messages from RunOutput when store_history_messages=False.
|
|
8898
|
+
This removes messages that were loaded from the agent's memory.
|
|
8899
|
+
"""
|
|
8900
|
+
# Remove messages with from_history=True
|
|
8901
|
+
if run_response.messages:
|
|
8902
|
+
run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
|
|
8903
|
+
|
|
8904
|
+
def _scrub_run_output_for_storage(self, run_response: RunOutput) -> bool:
|
|
8905
|
+
"""
|
|
8906
|
+
Scrub run output based on storage flags before persisting to database.
|
|
8907
|
+
Returns True if any scrubbing was done, False otherwise.
|
|
8908
|
+
"""
|
|
8909
|
+
scrubbed = False
|
|
8910
|
+
|
|
8911
|
+
if not self.store_media:
|
|
8912
|
+
self._scrub_media_from_run_output(run_response)
|
|
8913
|
+
scrubbed = True
|
|
8914
|
+
|
|
8915
|
+
if not self.store_tool_messages:
|
|
8916
|
+
self._scrub_tool_results_from_run_output(run_response)
|
|
8917
|
+
scrubbed = True
|
|
8918
|
+
|
|
8919
|
+
if not self.store_history_messages:
|
|
8920
|
+
self._scrub_history_messages_from_run_output(run_response)
|
|
8921
|
+
scrubbed = True
|
|
8922
|
+
|
|
8923
|
+
return scrubbed
|
|
8924
|
+
|
|
7688
8925
|
def _validate_media_object_id(
|
|
7689
8926
|
self,
|
|
7690
8927
|
images: Optional[Sequence[Image]] = None,
|