agno 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. agno/agent/agent.py +229 -164
  2. agno/db/dynamo/dynamo.py +8 -0
  3. agno/db/firestore/firestore.py +8 -0
  4. agno/db/gcs_json/gcs_json_db.py +9 -0
  5. agno/db/json/json_db.py +8 -0
  6. agno/db/migrations/v1_to_v2.py +191 -23
  7. agno/db/mongo/mongo.py +68 -0
  8. agno/db/mysql/mysql.py +13 -3
  9. agno/db/mysql/schemas.py +27 -27
  10. agno/db/postgres/postgres.py +19 -11
  11. agno/db/redis/redis.py +6 -0
  12. agno/db/singlestore/schemas.py +1 -1
  13. agno/db/singlestore/singlestore.py +8 -1
  14. agno/db/sqlite/sqlite.py +12 -3
  15. agno/integrations/discord/client.py +1 -0
  16. agno/knowledge/knowledge.py +92 -66
  17. agno/knowledge/reader/reader_factory.py +7 -3
  18. agno/knowledge/reader/web_search_reader.py +12 -6
  19. agno/models/base.py +2 -2
  20. agno/models/message.py +109 -0
  21. agno/models/openai/chat.py +3 -0
  22. agno/models/openai/responses.py +12 -0
  23. agno/models/response.py +5 -0
  24. agno/models/siliconflow/__init__.py +5 -0
  25. agno/models/siliconflow/siliconflow.py +25 -0
  26. agno/os/app.py +164 -41
  27. agno/os/auth.py +24 -14
  28. agno/os/interfaces/agui/utils.py +98 -134
  29. agno/os/router.py +128 -55
  30. agno/os/routers/evals/utils.py +9 -9
  31. agno/os/routers/health.py +25 -0
  32. agno/os/routers/home.py +52 -0
  33. agno/os/routers/knowledge/knowledge.py +11 -11
  34. agno/os/routers/session/session.py +24 -8
  35. agno/os/schema.py +29 -2
  36. agno/os/utils.py +0 -8
  37. agno/run/agent.py +3 -3
  38. agno/run/team.py +3 -3
  39. agno/run/workflow.py +64 -10
  40. agno/session/team.py +1 -0
  41. agno/team/team.py +189 -94
  42. agno/tools/duckduckgo.py +15 -11
  43. agno/tools/googlesearch.py +1 -1
  44. agno/tools/mem0.py +11 -17
  45. agno/tools/memory.py +34 -6
  46. agno/utils/common.py +90 -1
  47. agno/utils/streamlit.py +14 -8
  48. agno/utils/string.py +32 -0
  49. agno/utils/tools.py +1 -1
  50. agno/vectordb/chroma/chromadb.py +8 -2
  51. agno/workflow/step.py +115 -16
  52. agno/workflow/workflow.py +16 -13
  53. {agno-2.0.3.dist-info → agno-2.0.5.dist-info}/METADATA +6 -5
  54. {agno-2.0.3.dist-info → agno-2.0.5.dist-info}/RECORD +57 -54
  55. agno/knowledge/reader/url_reader.py +0 -128
  56. {agno-2.0.3.dist-info → agno-2.0.5.dist-info}/WHEEL +0 -0
  57. {agno-2.0.3.dist-info → agno-2.0.5.dist-info}/licenses/LICENSE +0 -0
  58. {agno-2.0.3.dist-info → agno-2.0.5.dist-info}/top_level.txt +0 -0
agno/agent/agent.py CHANGED
@@ -57,6 +57,7 @@ from agno.run.team import TeamRunOutputEvent
57
57
  from agno.session import AgentSession, SessionSummaryManager
58
58
  from agno.tools import Toolkit
59
59
  from agno.tools.function import Function
60
+ from agno.utils.common import is_typed_dict, validate_typed_dict
60
61
  from agno.utils.events import (
61
62
  create_memory_update_completed_event,
62
63
  create_memory_update_started_event,
@@ -106,7 +107,7 @@ from agno.utils.response import (
106
107
  get_paused_content,
107
108
  )
108
109
  from agno.utils.safe_formatter import SafeFormatter
109
- from agno.utils.string import parse_response_model_str
110
+ from agno.utils.string import generate_id_from_name, parse_response_model_str
110
111
  from agno.utils.timer import Timer
111
112
 
112
113
 
@@ -281,7 +282,7 @@ class Agent:
281
282
 
282
283
  # --- Agent Response Model Settings ---
283
284
  # Provide an input schema to validate the input
284
- input_schema: Optional[Type[BaseModel]] = None
285
+ input_schema: Optional[Union[Type[BaseModel], type]] = None
285
286
  # Provide a response model to get the response as a Pydantic model
286
287
  output_schema: Optional[Type[BaseModel]] = None
287
288
  # Provide a secondary model to parse the response from the primary model
@@ -398,13 +399,14 @@ class Agent:
398
399
  timezone_identifier: Optional[str] = None,
399
400
  resolve_in_context: bool = True,
400
401
  additional_input: Optional[List[Union[str, Dict, BaseModel, Message]]] = None,
402
+ user_message_role: str = "user",
401
403
  build_user_context: bool = True,
402
404
  retries: int = 0,
403
405
  delay_between_retries: int = 1,
404
406
  exponential_backoff: bool = False,
405
407
  parser_model: Optional[Model] = None,
406
408
  parser_model_prompt: Optional[str] = None,
407
- input_schema: Optional[Type[BaseModel]] = None,
409
+ input_schema: Optional[Union[Type[BaseModel], type]] = None,
408
410
  output_schema: Optional[Type[BaseModel]] = None,
409
411
  parse_response: bool = True,
410
412
  output_model: Optional[Model] = None,
@@ -495,7 +497,7 @@ class Agent:
495
497
  self.timezone_identifier = timezone_identifier
496
498
  self.resolve_in_context = resolve_in_context
497
499
  self.additional_input = additional_input
498
-
500
+ self.user_message_role = user_message_role
499
501
  self.build_user_context = build_user_context
500
502
 
501
503
  self.retries = retries
@@ -543,10 +545,7 @@ class Agent:
543
545
 
544
546
  def set_id(self) -> None:
545
547
  if self.id is None:
546
- if self.name is not None:
547
- self.id = self.name.lower().replace(" ", "-")
548
- else:
549
- self.id = str(uuid4())
548
+ self.id = generate_id_from_name(self.name)
550
549
 
551
550
  def _set_debug(self, debug_mode: Optional[bool] = None) -> None:
552
551
  # If the default debug mode is set, or passed on run, or via environment variable, set the debug mode to True
@@ -602,8 +601,6 @@ class Agent:
602
601
  if isinstance(input, BaseModel):
603
602
  if isinstance(input, self.input_schema):
604
603
  try:
605
- # Re-validate to catch any field validation errors
606
- input.model_validate(input.model_dump())
607
604
  return input
608
605
  except Exception as e:
609
606
  raise ValueError(f"BaseModel validation failed: {str(e)}")
@@ -614,8 +611,13 @@ class Agent:
614
611
  # Case 2: Message is a dict
615
612
  elif isinstance(input, dict):
616
613
  try:
617
- validated_model = self.input_schema(**input)
618
- return validated_model
614
+ # Check if the schema is a TypedDict
615
+ if is_typed_dict(self.input_schema):
616
+ validated_dict = validate_typed_dict(input, self.input_schema)
617
+ return validated_dict
618
+ else:
619
+ validated_model = self.input_schema(**input)
620
+ return validated_model
619
621
  except Exception as e:
620
622
  raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
621
623
 
@@ -785,14 +787,7 @@ class Agent:
785
787
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
786
788
  )
787
789
 
788
- # 4. Update Agent Memory
789
- response_iterator = self._make_memories_and_summaries(
790
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
791
- )
792
- # Consume the response iterator to ensure the memory is updated before the run is completed
793
- deque(response_iterator, maxlen=0)
794
-
795
- # 5. Calculate session metrics
790
+ # 4. Calculate session metrics
796
791
  self._update_session_metrics(session=session, run_response=run_response)
797
792
 
798
793
  run_response.status = RunStatus.completed
@@ -804,14 +799,21 @@ class Agent:
804
799
  if run_response.metrics:
805
800
  run_response.metrics.stop_timer()
806
801
 
807
- # 6. Optional: Save output to file if save_response_to_file is set
802
+ # 5. Optional: Save output to file if save_response_to_file is set
808
803
  self.save_run_response_to_file(
809
804
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
810
805
  )
811
806
 
812
- # 7. Add the RunOutput to Agent Session
807
+ # 6. Add the RunOutput to Agent Session
813
808
  session.upsert_run(run=run_response)
814
809
 
810
+ # 7. Update Agent Memory
811
+ response_iterator = self._make_memories_and_summaries(
812
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
813
+ )
814
+ # Consume the response iterator to ensure the memory is updated before the run is completed
815
+ deque(response_iterator, maxlen=0)
816
+
815
817
  # 8. Save session to memory
816
818
  self.save_session(session=session)
817
819
 
@@ -925,12 +927,7 @@ class Agent:
925
927
  )
926
928
  return
927
929
 
928
- # 3. Update Agent Memory
929
- yield from self._make_memories_and_summaries(
930
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
931
- )
932
-
933
- # 4. Calculate session metrics
930
+ # 3. Calculate session metrics
934
931
  self._update_session_metrics(session=session, run_response=run_response)
935
932
 
936
933
  run_response.status = RunStatus.completed
@@ -943,7 +940,7 @@ class Agent:
943
940
  if run_response.metrics:
944
941
  run_response.metrics.stop_timer()
945
942
 
946
- # 5. Optional: Save output to file if save_response_to_file is set
943
+ # 4. Optional: Save output to file if save_response_to_file is set
947
944
  self.save_run_response_to_file(
948
945
  run_response=run_response,
949
946
  input=run_messages.user_message,
@@ -951,9 +948,14 @@ class Agent:
951
948
  user_id=user_id,
952
949
  )
953
950
 
954
- # 6. Add RunOutput to Agent Session
951
+ # 5. Add RunOutput to Agent Session
955
952
  session.upsert_run(run=run_response)
956
953
 
954
+ # 6. Update Agent Memory
955
+ yield from self._make_memories_and_summaries(
956
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
957
+ )
958
+
957
959
  # 7. Save session to storage
958
960
  self.save_session(session=session)
959
961
 
@@ -1100,7 +1102,6 @@ class Agent:
1100
1102
  # Resolve dependencies
1101
1103
  if run_dependencies is not None:
1102
1104
  self._resolve_run_dependencies(dependencies=run_dependencies)
1103
-
1104
1105
  add_dependencies = (
1105
1106
  add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
1106
1107
  )
@@ -1197,7 +1198,7 @@ class Agent:
1197
1198
  files=files,
1198
1199
  knowledge_filters=effective_filters,
1199
1200
  add_history_to_context=add_history,
1200
- dependencies=dependencies,
1201
+ dependencies=run_dependencies,
1201
1202
  add_dependencies_to_context=add_dependencies,
1202
1203
  add_session_state_to_context=add_session_state,
1203
1204
  **kwargs,
@@ -1278,41 +1279,75 @@ class Agent:
1278
1279
  async def _arun(
1279
1280
  self,
1280
1281
  run_response: RunOutput,
1281
- run_messages: RunMessages,
1282
+ input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1282
1283
  session: AgentSession,
1284
+ session_state: Optional[Dict[str, Any]] = None,
1283
1285
  user_id: Optional[str] = None,
1286
+ images: Optional[Sequence[Image]] = None,
1287
+ videos: Optional[Sequence[Video]] = None,
1288
+ audio: Optional[Sequence[Audio]] = None,
1289
+ files: Optional[Sequence[File]] = None,
1290
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1291
+ add_history_to_context: Optional[bool] = None,
1292
+ add_dependencies_to_context: Optional[bool] = None,
1293
+ add_session_state_to_context: Optional[bool] = None,
1294
+ metadata: Optional[Dict[str, Any]] = None,
1284
1295
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1285
1296
  dependencies: Optional[Dict[str, Any]] = None,
1297
+ **kwargs: Any,
1286
1298
  ) -> RunOutput:
1287
1299
  """Run the Agent and yield the RunOutput.
1288
1300
 
1289
1301
  Steps:
1290
- 1. Reason about the task if reasoning is enabled
1291
- 2. Generate a response from the Model (includes running function calls)
1292
- 3. Update the RunOutput with the model response
1293
- 4. Update Agent Memory
1294
- 5. Calculate session metrics
1295
- 6. Add RunOutput to Agent Session
1296
- 7. Save session to storage
1297
- 8. Optional: Save output to file if save_response_to_file is set
1302
+ 1. Resolve dependencies
1303
+ 2. Prepare run messages
1304
+ 3. Reason about the task if reasoning is enabled
1305
+ 4. Generate a response from the Model (includes running function calls)
1306
+ 5. Update the RunOutput with the model response
1307
+ 6. Update Agent Memory
1308
+ 7. Calculate session metrics
1309
+ 8. Add RunOutput to Agent Session
1310
+ 9. Save session to storage
1298
1311
  """
1299
- # Resolving here for async requirement
1312
+ # 1. Resolving here for async requirement
1300
1313
  if dependencies is not None:
1301
1314
  await self._aresolve_run_dependencies(dependencies)
1302
1315
 
1316
+ # 2. Prepare run messages
1317
+ run_messages: RunMessages = self._get_run_messages(
1318
+ run_response=run_response,
1319
+ input=input,
1320
+ session=session,
1321
+ session_state=session_state,
1322
+ user_id=user_id,
1323
+ audio=audio,
1324
+ images=images,
1325
+ videos=videos,
1326
+ files=files,
1327
+ knowledge_filters=knowledge_filters,
1328
+ add_history_to_context=add_history_to_context,
1329
+ dependencies=dependencies,
1330
+ add_dependencies_to_context=add_dependencies_to_context,
1331
+ add_session_state_to_context=add_session_state_to_context,
1332
+ metadata=metadata,
1333
+ **kwargs,
1334
+ )
1335
+ if len(run_messages.messages) == 0:
1336
+ log_error("No messages to be sent to the model.")
1337
+
1303
1338
  log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
1304
1339
 
1305
1340
  # Register run for cancellation tracking
1306
1341
  register_run(run_response.run_id) # type: ignore
1307
1342
 
1308
1343
  self.model = cast(Model, self.model)
1309
- # 1. Reason about the task if reasoning is enabled
1344
+ # 3. Reason about the task if reasoning is enabled
1310
1345
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1311
1346
 
1312
1347
  # Check for cancellation before model call
1313
1348
  raise_if_cancelled(run_response.run_id) # type: ignore
1314
1349
 
1315
- # 2. Generate a response from the Model (includes running function calls)
1350
+ # 4. Generate a response from the Model (includes running function calls)
1316
1351
  model_response: ModelResponse = await self.model.aresponse(
1317
1352
  messages=run_messages.messages,
1318
1353
  tools=self._tools_for_model,
@@ -1331,7 +1366,7 @@ class Agent:
1331
1366
  # If a parser model is provided, structure the response separately
1332
1367
  await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
1333
1368
 
1334
- # 3. Update the RunOutput with the model response
1369
+ # 5. Update the RunOutput with the model response
1335
1370
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
1336
1371
 
1337
1372
  if self.store_media:
@@ -1345,12 +1380,6 @@ class Agent:
1345
1380
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1346
1381
  )
1347
1382
 
1348
- # 5. Update Agent Memory
1349
- async for _ in self._amake_memories_and_summaries(
1350
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1351
- ):
1352
- pass
1353
-
1354
1383
  # 6. Calculate session metrics
1355
1384
  self._update_session_metrics(session=session, run_response=run_response)
1356
1385
 
@@ -1363,7 +1392,7 @@ class Agent:
1363
1392
  if run_response.metrics:
1364
1393
  run_response.metrics.stop_timer()
1365
1394
 
1366
- # 6. Optional: Save output to file if save_response_to_file is set
1395
+ # Optional: Save output to file if save_response_to_file is set
1367
1396
  self.save_run_response_to_file(
1368
1397
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
1369
1398
  )
@@ -1371,7 +1400,13 @@ class Agent:
1371
1400
  # 7. Add RunOutput to Agent Session
1372
1401
  session.upsert_run(run=run_response)
1373
1402
 
1374
- # 8. Save session to storage
1403
+ # 8. Update Agent Memory
1404
+ async for _ in self._amake_memories_and_summaries(
1405
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1406
+ ):
1407
+ pass
1408
+
1409
+ # 9. Save session to storage
1375
1410
  self.save_session(session=session)
1376
1411
 
1377
1412
  # Log Agent Telemetry
@@ -1387,30 +1422,61 @@ class Agent:
1387
1422
  async def _arun_stream(
1388
1423
  self,
1389
1424
  run_response: RunOutput,
1390
- run_messages: RunMessages,
1391
1425
  session: AgentSession,
1426
+ input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1427
+ session_state: Optional[Dict[str, Any]] = None,
1428
+ audio: Optional[Sequence[Audio]] = None,
1429
+ images: Optional[Sequence[Image]] = None,
1430
+ videos: Optional[Sequence[Video]] = None,
1431
+ files: Optional[Sequence[File]] = None,
1432
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1433
+ add_history_to_context: Optional[bool] = None,
1434
+ add_dependencies_to_context: Optional[bool] = None,
1435
+ add_session_state_to_context: Optional[bool] = None,
1436
+ metadata: Optional[Dict[str, Any]] = None,
1437
+ dependencies: Optional[Dict[str, Any]] = None,
1392
1438
  user_id: Optional[str] = None,
1393
1439
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1394
1440
  stream_intermediate_steps: bool = False,
1395
1441
  workflow_context: Optional[Dict] = None,
1396
1442
  yield_run_response: Optional[bool] = None,
1397
- dependencies: Optional[Dict[str, Any]] = None,
1443
+ **kwargs: Any,
1398
1444
  ) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
1399
1445
  """Run the Agent and yield the RunOutput.
1400
1446
 
1401
1447
  Steps:
1402
- 1. Reason about the task if reasoning is enabled
1403
- 2. Generate a response from the Model (includes running function calls)
1404
- 3. Add the RunOutput to the Agent Session
1405
- 4. Update Agent Memory
1406
- 5. Calculate session metrics
1407
- 6. Add RunOutput to Agent Session
1408
- 7. Save session to storage
1448
+ 1. Resolve dependencies
1449
+ 2. Prepare run messages
1450
+ 3. Reason about the task if reasoning is enabled
1451
+ 4. Generate a response from the Model (includes running function calls)
1452
+ 5. Update Agent Memory
1453
+ 6. Calculate session metrics
1454
+ 7. Add RunOutput to Agent Session
1455
+ 8. Save session to storage
1409
1456
  """
1410
- run_dependencies = dependencies if dependencies is not None else self.dependencies
1411
- # Resolving here for async requirement
1412
- if run_dependencies is not None:
1413
- await self._aresolve_run_dependencies(dependencies=run_dependencies)
1457
+ # 1. Resolving here for async requirement
1458
+ if dependencies is not None:
1459
+ await self._aresolve_run_dependencies(dependencies=dependencies)
1460
+
1461
+ # 2. Prepare run messages
1462
+ run_messages: RunMessages = self._get_run_messages(
1463
+ run_response=run_response,
1464
+ input=input,
1465
+ session=session,
1466
+ session_state=session_state,
1467
+ user_id=user_id,
1468
+ audio=audio,
1469
+ images=images,
1470
+ videos=videos,
1471
+ files=files,
1472
+ knowledge_filters=knowledge_filters,
1473
+ add_history_to_context=add_history_to_context,
1474
+ dependencies=dependencies,
1475
+ add_dependencies_to_context=add_dependencies_to_context,
1476
+ add_session_state_to_context=add_session_state_to_context,
1477
+ metadata=metadata,
1478
+ **kwargs,
1479
+ )
1414
1480
 
1415
1481
  log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
1416
1482
 
@@ -1422,7 +1488,7 @@ class Agent:
1422
1488
  if stream_intermediate_steps:
1423
1489
  yield self._handle_event(create_run_started_event(run_response), run_response, workflow_context)
1424
1490
 
1425
- # 1. Reason about the task if reasoning is enabled
1491
+ # 3. Reason about the task if reasoning is enabled
1426
1492
  async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
1427
1493
  raise_if_cancelled(run_response.run_id) # type: ignore
1428
1494
  yield item
@@ -1430,7 +1496,7 @@ class Agent:
1430
1496
  # Check for cancellation before model processing
1431
1497
  raise_if_cancelled(run_response.run_id) # type: ignore
1432
1498
 
1433
- # 2. Generate a response from the Model
1499
+ # 4. Generate a response from the Model
1434
1500
  if self.output_model is None:
1435
1501
  async for event in self._ahandle_model_response_stream(
1436
1502
  session=session,
@@ -1494,13 +1560,7 @@ class Agent:
1494
1560
  yield item
1495
1561
  return
1496
1562
 
1497
- # 5. Update Agent Memory
1498
- async for event in self._amake_memories_and_summaries(
1499
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1500
- ):
1501
- yield event
1502
-
1503
- # 6. Calculate session metrics
1563
+ # 5. Calculate session metrics
1504
1564
  self._update_session_metrics(session=session, run_response=run_response)
1505
1565
 
1506
1566
  run_response.status = RunStatus.completed
@@ -1513,7 +1573,7 @@ class Agent:
1513
1573
  if run_response.metrics:
1514
1574
  run_response.metrics.stop_timer()
1515
1575
 
1516
- # 8. Optional: Save output to file if save_response_to_file is set
1576
+ # Optional: Save output to file if save_response_to_file is set
1517
1577
  self.save_run_response_to_file(
1518
1578
  run_response=run_response,
1519
1579
  input=run_messages.user_message,
@@ -1524,7 +1584,13 @@ class Agent:
1524
1584
  # 6. Add RunOutput to Agent Session
1525
1585
  session.upsert_run(run=run_response)
1526
1586
 
1527
- # 7. Save session to storage
1587
+ # 7. Update Agent Memory
1588
+ async for event in self._amake_memories_and_summaries(
1589
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1590
+ ):
1591
+ yield event
1592
+
1593
+ # 8. Save session to storage
1528
1594
  self.save_session(session=session)
1529
1595
 
1530
1596
  if stream_intermediate_steps:
@@ -1666,10 +1732,6 @@ class Agent:
1666
1732
  # Determine run dependencies
1667
1733
  run_dependencies = dependencies if dependencies is not None else self.dependencies
1668
1734
 
1669
- # Resolve callable dependencies if present
1670
- if run_dependencies is not None:
1671
- self._resolve_run_dependencies(dependencies=run_dependencies)
1672
-
1673
1735
  add_dependencies = (
1674
1736
  add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
1675
1737
  )
@@ -1750,51 +1812,52 @@ class Agent:
1750
1812
 
1751
1813
  for attempt in range(num_attempts):
1752
1814
  try:
1753
- # Prepare run messages
1754
- run_messages: RunMessages = self._get_run_messages(
1755
- run_response=run_response,
1756
- input=validated_input,
1757
- session=agent_session,
1758
- session_state=session_state,
1759
- user_id=user_id,
1760
- audio=audio,
1761
- images=images,
1762
- videos=videos,
1763
- files=files,
1764
- knowledge_filters=effective_filters,
1765
- add_history_to_context=add_history,
1766
- dependencies=dependencies,
1767
- add_dependencies_to_context=add_dependencies,
1768
- add_session_state_to_context=add_session_state,
1769
- metadata=metadata,
1770
- **kwargs,
1771
- )
1772
- if len(run_messages.messages) == 0:
1773
- log_error("No messages to be sent to the model.")
1774
-
1775
- run_messages = run_messages
1776
-
1777
1815
  # Pass the new run_response to _arun
1778
1816
  if stream:
1779
1817
  return self._arun_stream( # type: ignore
1780
1818
  run_response=run_response,
1781
- run_messages=run_messages,
1819
+ input=validated_input,
1782
1820
  user_id=user_id,
1783
1821
  session=agent_session,
1822
+ session_state=session_state,
1823
+ audio=audio,
1824
+ images=images,
1825
+ videos=videos,
1826
+ files=files,
1827
+ knowledge_filters=knowledge_filters,
1828
+ add_history_to_context=add_history,
1829
+ add_dependencies_to_context=add_dependencies,
1830
+ add_session_state_to_context=add_session_state,
1831
+ metadata=metadata,
1784
1832
  response_format=response_format,
1785
1833
  stream_intermediate_steps=stream_intermediate_steps,
1786
1834
  workflow_context=workflow_context,
1787
1835
  yield_run_response=yield_run_response,
1788
1836
  dependencies=run_dependencies,
1837
+ **kwargs,
1789
1838
  ) # type: ignore[assignment]
1790
1839
  else:
1791
1840
  return self._arun( # type: ignore
1792
1841
  run_response=run_response,
1793
- run_messages=run_messages,
1842
+ input=validated_input,
1794
1843
  user_id=user_id,
1795
1844
  session=agent_session,
1845
+ session_state=session_state,
1846
+ audio=audio,
1847
+ images=images,
1848
+ videos=videos,
1849
+ files=files,
1850
+ knowledge_filters=knowledge_filters,
1851
+ add_history_to_context=add_history,
1852
+ add_dependencies_to_context=add_dependencies,
1853
+ add_session_state_to_context=add_session_state,
1854
+ metadata=metadata,
1796
1855
  response_format=response_format,
1856
+ stream_intermediate_steps=stream_intermediate_steps,
1857
+ workflow_context=workflow_context,
1858
+ yield_run_response=yield_run_response,
1797
1859
  dependencies=run_dependencies,
1860
+ **kwargs,
1798
1861
  )
1799
1862
  except ModelProviderError as e:
1800
1863
  log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
@@ -2111,14 +2174,7 @@ class Agent:
2111
2174
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2112
2175
  )
2113
2176
 
2114
- # 4. Update Agent Memory
2115
- response_iterator = self._make_memories_and_summaries(
2116
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2117
- )
2118
- # Consume the response iterator to ensure the memory is updated before the run is completed
2119
- deque(response_iterator, maxlen=0)
2120
-
2121
- # 5. Calculate session metrics
2177
+ # 3. Calculate session metrics
2122
2178
  self._update_session_metrics(session=session, run_response=run_response)
2123
2179
 
2124
2180
  run_response.status = RunStatus.completed
@@ -2130,14 +2186,21 @@ class Agent:
2130
2186
  if run_response.metrics:
2131
2187
  run_response.metrics.stop_timer()
2132
2188
 
2133
- # 5. Save output to file if save_response_to_file is set
2189
+ # 4. Save output to file if save_response_to_file is set
2134
2190
  self.save_run_response_to_file(
2135
2191
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
2136
2192
  )
2137
2193
 
2138
- # 6. Add the run to memory
2194
+ # 5. Add the run to memory
2139
2195
  session.upsert_run(run=run_response)
2140
2196
 
2197
+ # 6. Update Agent Memory
2198
+ response_iterator = self._make_memories_and_summaries(
2199
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2200
+ )
2201
+ # Consume the response iterator to ensure the memory is updated before the run is completed
2202
+ deque(response_iterator, maxlen=0)
2203
+
2141
2204
  # 7. Save session to storage
2142
2205
  self.save_session(session=session)
2143
2206
 
@@ -2195,12 +2258,7 @@ class Agent:
2195
2258
  )
2196
2259
  return
2197
2260
 
2198
- # 4. Update Agent Memory
2199
- yield from self._make_memories_and_summaries(
2200
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2201
- )
2202
-
2203
- # 5. Calculate session metrics
2261
+ # 3. Calculate session metrics
2204
2262
  self._update_session_metrics(session=session, run_response=run_response)
2205
2263
 
2206
2264
  run_response.status = RunStatus.completed
@@ -2211,14 +2269,19 @@ class Agent:
2211
2269
  if run_response.metrics:
2212
2270
  run_response.metrics.stop_timer()
2213
2271
 
2214
- # 5. Save output to file if save_response_to_file is set
2272
+ # 4. Save output to file if save_response_to_file is set
2215
2273
  self.save_run_response_to_file(
2216
2274
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
2217
2275
  )
2218
2276
 
2219
- # 6. Add the run to memory
2277
+ # 5. Add the run to memory
2220
2278
  session.upsert_run(run=run_response)
2221
2279
 
2280
+ # 6. Update Agent Memory
2281
+ yield from self._make_memories_and_summaries(
2282
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2283
+ )
2284
+
2222
2285
  # 7. Save session to storage
2223
2286
  self.save_session(session=session)
2224
2287
 
@@ -2497,13 +2560,7 @@ class Agent:
2497
2560
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2498
2561
  )
2499
2562
 
2500
- # 4. Update Agent Memory
2501
- async for _ in self._amake_memories_and_summaries(
2502
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2503
- ):
2504
- pass
2505
-
2506
- # 5. Calculate session metrics
2563
+ # 3. Calculate session metrics
2507
2564
  self._update_session_metrics(session=session, run_response=run_response)
2508
2565
 
2509
2566
  run_response.status = RunStatus.completed
@@ -2515,14 +2572,21 @@ class Agent:
2515
2572
  if run_response.metrics:
2516
2573
  run_response.metrics.stop_timer()
2517
2574
 
2518
- # 7. Save output to file if save_response_to_file is set
2575
+ # 4. Save output to file if save_response_to_file is set
2519
2576
  self.save_run_response_to_file(
2520
2577
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
2521
2578
  )
2522
2579
 
2580
+ # 5. Add the run to memory
2523
2581
  session.upsert_run(run=run_response)
2524
2582
 
2525
- # 6. Save session to storage
2583
+ # 6. Update Agent Memory
2584
+ async for _ in self._amake_memories_and_summaries(
2585
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2586
+ ):
2587
+ pass
2588
+
2589
+ # 7. Save session to storage
2526
2590
  self.save_session(session=session)
2527
2591
 
2528
2592
  # Log Agent Telemetry
@@ -2578,9 +2642,6 @@ class Agent:
2578
2642
  ):
2579
2643
  yield event
2580
2644
 
2581
- # 3. Add the run to memory
2582
- session.upsert_run(run=run_response)
2583
-
2584
2645
  # We should break out of the run function
2585
2646
  if any(tool_call.is_paused for tool_call in run_response.tools or []):
2586
2647
  for item in self._handle_agent_run_paused_stream(
@@ -2589,13 +2650,7 @@ class Agent:
2589
2650
  yield item
2590
2651
  return
2591
2652
 
2592
- # 4. Update Agent Memory
2593
- async for event in self._amake_memories_and_summaries(
2594
- run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2595
- ):
2596
- yield event
2597
-
2598
- # 5. Calculate session metrics
2653
+ # 3. Calculate session metrics
2599
2654
  self._update_session_metrics(session=session, run_response=run_response)
2600
2655
 
2601
2656
  run_response.status = RunStatus.completed
@@ -2606,14 +2661,21 @@ class Agent:
2606
2661
  if run_response.metrics:
2607
2662
  run_response.metrics.stop_timer()
2608
2663
 
2609
- # 6. Save output to file if save_response_to_file is set
2664
+ # 4. Save output to file if save_response_to_file is set
2610
2665
  self.save_run_response_to_file(
2611
2666
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
2612
2667
  )
2613
2668
 
2669
+ # 5. Add the run to memory
2614
2670
  session.upsert_run(run=run_response)
2615
2671
 
2616
- # 6. Save session to storage
2672
+ # 6. Update Agent Memory
2673
+ async for event in self._amake_memories_and_summaries(
2674
+ run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
2675
+ ):
2676
+ yield event
2677
+
2678
+ # 7. Save session to storage
2617
2679
  self.save_session(session=session)
2618
2680
 
2619
2681
  if stream_intermediate_steps:
@@ -3270,13 +3332,6 @@ class Agent:
3270
3332
  ) + model_response_event.reasoning_content
3271
3333
  run_response.reasoning_content = model_response.reasoning_content
3272
3334
 
3273
- if model_response_event.reasoning_content is not None:
3274
- if not model_response.reasoning_content:
3275
- model_response.reasoning_content = model_response_event.reasoning_content
3276
- else:
3277
- model_response.reasoning_content += model_response_event.reasoning_content
3278
- run_response.reasoning_content = model_response.reasoning_content
3279
-
3280
3335
  if model_response_event.redacted_reasoning_content is not None:
3281
3336
  if not model_response.reasoning_content:
3282
3337
  model_response.reasoning_content = model_response_event.redacted_reasoning_content
@@ -4082,7 +4137,7 @@ class Agent:
4082
4137
  return json_response_format
4083
4138
 
4084
4139
  def _resolve_run_dependencies(self, dependencies: Dict[str, Any]) -> None:
4085
- from inspect import signature
4140
+ from inspect import iscoroutine, iscoroutinefunction, signature
4086
4141
 
4087
4142
  # Dependencies should already be resolved in run() method
4088
4143
  log_debug("Resolving dependencies")
@@ -4091,7 +4146,10 @@ class Agent:
4091
4146
  return
4092
4147
 
4093
4148
  for key, value in dependencies.items():
4094
- if callable(value):
4149
+ if iscoroutine(value) or iscoroutinefunction(value):
4150
+ log_warning(f"Dependency {key} is a coroutine. Use agent.arun() or agent.aprint_response() instead.")
4151
+ continue
4152
+ elif callable(value):
4095
4153
  try:
4096
4154
  sig = signature(value)
4097
4155
  result = value(agent=self) if "agent" in sig.parameters else value()
@@ -4103,7 +4161,7 @@ class Agent:
4103
4161
  dependencies[key] = value
4104
4162
 
4105
4163
  async def _aresolve_run_dependencies(self, dependencies: Dict[str, Any]) -> None:
4106
- from inspect import iscoroutine, signature
4164
+ from inspect import iscoroutine, iscoroutinefunction, signature
4107
4165
 
4108
4166
  log_debug("Resolving context (async)")
4109
4167
  if not isinstance(dependencies, dict):
@@ -4114,14 +4172,12 @@ class Agent:
4114
4172
  if not callable(value):
4115
4173
  dependencies[key] = value
4116
4174
  continue
4117
-
4118
4175
  try:
4119
4176
  sig = signature(value)
4120
4177
  result = value(agent=self) if "agent" in sig.parameters else value()
4121
4178
 
4122
- if iscoroutine(result):
4179
+ if iscoroutine(result) or iscoroutinefunction(result):
4123
4180
  result = await result
4124
-
4125
4181
  dependencies[key] = result
4126
4182
  except Exception as e:
4127
4183
  log_warning(f"Failed to resolve context for '{key}': {e}")
@@ -4851,7 +4907,7 @@ class Agent:
4851
4907
 
4852
4908
  # 3.3.15 Add the session state to the system message
4853
4909
  if self.add_session_state_to_context and session_state is not None:
4854
- system_message_content += f"\n<session_state>\n{session_state}\n</session_state>\n\n"
4910
+ system_message_content += self._get_formatted_session_state_for_system_message(session_state)
4855
4911
 
4856
4912
  # Return the system message
4857
4913
  return (
@@ -4860,6 +4916,9 @@ class Agent:
4860
4916
  else None
4861
4917
  )
4862
4918
 
4919
+ def _get_formatted_session_state_for_system_message(self, session_state: Dict[str, Any]) -> str:
4920
+ return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
4921
+
4863
4922
  def _get_user_message(
4864
4923
  self,
4865
4924
  *,
@@ -5041,7 +5100,7 @@ class Agent:
5041
5100
  files: Optional[Sequence[File]] = None,
5042
5101
  knowledge_filters: Optional[Dict[str, Any]] = None,
5043
5102
  add_history_to_context: Optional[bool] = None,
5044
- run_dependencies: Optional[Dict[str, Any]] = None,
5103
+ dependencies: Optional[Dict[str, Any]] = None,
5045
5104
  add_dependencies_to_context: Optional[bool] = None,
5046
5105
  add_session_state_to_context: Optional[bool] = None,
5047
5106
  metadata: Optional[Dict[str, Any]] = None,
@@ -5079,7 +5138,7 @@ class Agent:
5079
5138
  session=session,
5080
5139
  session_state=session_state,
5081
5140
  user_id=user_id,
5082
- dependencies=run_dependencies,
5141
+ dependencies=dependencies,
5083
5142
  metadata=metadata,
5084
5143
  add_session_state_to_context=add_session_state_to_context,
5085
5144
  )
@@ -5160,7 +5219,7 @@ class Agent:
5160
5219
  videos=videos,
5161
5220
  files=files,
5162
5221
  knowledge_filters=knowledge_filters,
5163
- run_dependencies=run_dependencies,
5222
+ dependencies=dependencies,
5164
5223
  add_dependencies_to_context=add_dependencies_to_context,
5165
5224
  metadata=metadata,
5166
5225
  **kwargs,
@@ -5173,7 +5232,13 @@ class Agent:
5173
5232
  # 4.3 If input is provided as a dict, try to validate it as a Message
5174
5233
  elif isinstance(input, dict):
5175
5234
  try:
5176
- user_message = Message.model_validate(input)
5235
+ if self.input_schema and is_typed_dict(self.input_schema):
5236
+ import json
5237
+
5238
+ content = json.dumps(input, indent=2, ensure_ascii=False)
5239
+ user_message = Message(role=self.user_message_role, content=content)
5240
+ else:
5241
+ user_message = Message.model_validate(input)
5177
5242
  except Exception as e:
5178
5243
  log_warning(f"Failed to validate message: {e}")
5179
5244