agno 2.0.3__py3-none-any.whl → 2.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. agno/agent/agent.py +162 -86
  2. agno/db/dynamo/dynamo.py +8 -0
  3. agno/db/firestore/firestore.py +8 -1
  4. agno/db/gcs_json/gcs_json_db.py +9 -0
  5. agno/db/json/json_db.py +8 -0
  6. agno/db/mongo/mongo.py +10 -1
  7. agno/db/mysql/mysql.py +10 -0
  8. agno/db/postgres/postgres.py +16 -8
  9. agno/db/redis/redis.py +6 -0
  10. agno/db/singlestore/schemas.py +1 -1
  11. agno/db/singlestore/singlestore.py +8 -1
  12. agno/db/sqlite/sqlite.py +9 -1
  13. agno/db/utils.py +14 -0
  14. agno/knowledge/knowledge.py +91 -65
  15. agno/models/base.py +2 -2
  16. agno/models/openai/chat.py +3 -0
  17. agno/models/openai/responses.py +6 -0
  18. agno/models/response.py +5 -0
  19. agno/models/siliconflow/__init__.py +5 -0
  20. agno/models/siliconflow/siliconflow.py +25 -0
  21. agno/os/app.py +4 -1
  22. agno/os/auth.py +24 -14
  23. agno/os/router.py +128 -55
  24. agno/os/routers/evals/utils.py +9 -9
  25. agno/os/routers/health.py +26 -0
  26. agno/os/routers/knowledge/knowledge.py +11 -11
  27. agno/os/routers/session/session.py +24 -8
  28. agno/os/schema.py +8 -2
  29. agno/run/workflow.py +64 -10
  30. agno/session/team.py +1 -0
  31. agno/team/team.py +192 -92
  32. agno/tools/mem0.py +11 -17
  33. agno/tools/memory.py +34 -6
  34. agno/utils/common.py +90 -1
  35. agno/utils/streamlit.py +14 -8
  36. agno/vectordb/chroma/chromadb.py +8 -2
  37. agno/workflow/step.py +111 -13
  38. agno/workflow/workflow.py +16 -13
  39. {agno-2.0.3.dist-info → agno-2.0.4.dist-info}/METADATA +1 -1
  40. {agno-2.0.3.dist-info → agno-2.0.4.dist-info}/RECORD +43 -40
  41. {agno-2.0.3.dist-info → agno-2.0.4.dist-info}/WHEEL +0 -0
  42. {agno-2.0.3.dist-info → agno-2.0.4.dist-info}/licenses/LICENSE +0 -0
  43. {agno-2.0.3.dist-info → agno-2.0.4.dist-info}/top_level.txt +0 -0
agno/team/team.py CHANGED
@@ -55,6 +55,7 @@ from agno.run.team import TeamRunEvent, TeamRunInput, TeamRunOutput, TeamRunOutp
55
55
  from agno.session import SessionSummaryManager, TeamSession
56
56
  from agno.tools import Toolkit
57
57
  from agno.tools.function import Function
58
+ from agno.utils.common import is_typed_dict, validate_typed_dict
58
59
  from agno.utils.events import (
59
60
  create_team_memory_update_completed_event,
60
61
  create_team_memory_update_started_event,
@@ -621,8 +622,6 @@ class Team:
621
622
  if isinstance(input, BaseModel):
622
623
  if isinstance(input, self.input_schema):
623
624
  try:
624
- # Re-validate to catch any field validation errors
625
- input.model_validate(input.model_dump())
626
625
  return input
627
626
  except Exception as e:
628
627
  raise ValueError(f"BaseModel validation failed: {str(e)}")
@@ -633,8 +632,13 @@ class Team:
633
632
  # Case 2: Message is a dict
634
633
  elif isinstance(input, dict):
635
634
  try:
636
- validated_model = self.input_schema(**input)
637
- return validated_model
635
+ # Check if the schema is a TypedDict
636
+ if is_typed_dict(self.input_schema):
637
+ validated_dict = validate_typed_dict(input, self.input_schema)
638
+ return validated_dict
639
+ else:
640
+ validated_model = self.input_schema(**input)
641
+ return validated_model
638
642
  except Exception as e:
639
643
  raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
640
644
 
@@ -806,9 +810,9 @@ class Team:
806
810
  1. Reason about the task(s) if reasoning is enabled
807
811
  2. Get a response from the model
808
812
  3. Update Team Memory
809
- 5. Save session to storage
810
- 6. Parse any structured outputs
811
- 7. Log the team run
813
+ 4. Add RunOutput to Team Session
814
+ 5. Calculate session metrics
815
+ 6. Save session to storage
812
816
  """
813
817
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
814
818
 
@@ -849,10 +853,7 @@ class Team:
849
853
  else:
850
854
  self._scrub_media_from_run_output(run_response)
851
855
 
852
- # 4. Add the RunOutput to Team Session
853
- session.upsert_run(run_response=run_response)
854
-
855
- # 5. Update Team Memory
856
+ # 3. Update Team Memory
856
857
  response_iterator = self._make_memories_and_summaries(
857
858
  run_response=run_response,
858
859
  run_messages=run_messages,
@@ -861,14 +862,17 @@ class Team:
861
862
  )
862
863
  deque(response_iterator, maxlen=0)
863
864
 
864
- # 5. Calculate session metrics
865
- self._update_session_metrics(session=session)
866
-
867
865
  run_response.status = RunStatus.completed
868
866
 
869
- # 5. Parse team response model
867
+ # Parse team response model
870
868
  self._convert_response_to_structured_format(run_response=run_response)
871
869
 
870
+ # 4. Add the RunOutput to Team Session
871
+ session.upsert_run(run_response=run_response)
872
+
873
+ # 5. Calculate session metrics
874
+ self._update_session_metrics(session=session)
875
+
872
876
  # 6. Save session to storage
873
877
  self.save_session(session=session)
874
878
 
@@ -899,8 +903,9 @@ class Team:
899
903
  1. Reason about the task(s) if reasoning is enabled
900
904
  2. Get a response from the model
901
905
  3. Update Team Memory
902
- 4. Save session to storage
903
- 5. Log Team Run
906
+ 4. Add RunOutput to Team Session
907
+ 5. Calculate session metrics
908
+ 6. Save session to storage
904
909
  """
905
910
 
906
911
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
@@ -973,10 +978,7 @@ class Team:
973
978
  session=session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
974
979
  )
975
980
 
976
- # 4. Add the run to memory
977
- session.upsert_run(run_response=run_response)
978
-
979
- # 5. Update Team Memory
981
+ # 3. Update Team Memory
980
982
  yield from self._make_memories_and_summaries(
981
983
  run_response=run_response,
982
984
  run_messages=run_messages,
@@ -984,11 +986,14 @@ class Team:
984
986
  user_id=user_id,
985
987
  )
986
988
 
989
+ run_response.status = RunStatus.completed
990
+
991
+ # 4. Add the run to memory
992
+ session.upsert_run(run_response=run_response)
993
+
987
994
  # 5. Calculate session metrics
988
995
  self._update_session_metrics(session=session)
989
996
 
990
- run_response.status = RunStatus.completed
991
-
992
997
  completed_event = self._handle_event(
993
998
  create_team_run_completed_event(
994
999
  from_run_response=run_response,
@@ -1338,35 +1343,72 @@ class Team:
1338
1343
  async def _arun(
1339
1344
  self,
1340
1345
  run_response: TeamRunOutput,
1341
- run_messages: RunMessages,
1346
+ input_message: Union[str, List, Dict, Message, BaseModel, List[Message]],
1342
1347
  session: TeamSession,
1348
+ session_state: Optional[Dict[str, Any]] = None,
1343
1349
  user_id: Optional[str] = None,
1350
+ images: Optional[Sequence[Image]] = None,
1351
+ videos: Optional[Sequence[Video]] = None,
1352
+ audio: Optional[Sequence[Audio]] = None,
1353
+ files: Optional[Sequence[File]] = None,
1354
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1355
+ add_history_to_context: Optional[bool] = None,
1356
+ add_dependencies_to_context: Optional[bool] = None,
1357
+ add_session_state_to_context: Optional[bool] = None,
1358
+ metadata: Optional[Dict[str, Any]] = None,
1344
1359
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1360
+ dependencies: Optional[Dict[str, Any]] = None,
1361
+ **kwargs: Any,
1345
1362
  ) -> TeamRunOutput:
1346
1363
  """Run the Team and return the response.
1347
1364
 
1348
1365
  Steps:
1349
- 1. Reason about the task(s) if reasoning is enabled
1350
- 2. Get a response from the model
1351
- 3. Update Team Memory
1352
- 5. Save session to storage
1353
- 6. Parse any structured outputs
1354
- 7. Log the team run
1366
+ 1. Resolve dependencies
1367
+ 2. Prepare run messages
1368
+ 3. Reason about the task(s) if reasoning is enabled
1369
+ 4. Get a response from the model
1370
+ 5. Update Team Memory
1371
+ 6. Add RunOutput to Team Session
1372
+ 7. Calculate session metrics
1373
+ 8. Save session to storage
1355
1374
  """
1356
- self.model = cast(Model, self.model)
1375
+ # 1. Resolve callable dependencies if present
1376
+ if dependencies is not None:
1377
+ await self._aresolve_run_dependencies(dependencies=dependencies)
1357
1378
 
1379
+ # 2. Prepare run messages
1380
+ run_messages = self._get_run_messages(
1381
+ run_response=run_response,
1382
+ session=session,
1383
+ session_state=session_state,
1384
+ user_id=user_id,
1385
+ input_message=input_message,
1386
+ audio=audio,
1387
+ images=images,
1388
+ videos=videos,
1389
+ files=files,
1390
+ knowledge_filters=knowledge_filters,
1391
+ add_history_to_context=add_history_to_context,
1392
+ dependencies=dependencies,
1393
+ add_dependencies_to_context=add_dependencies_to_context,
1394
+ add_session_state_to_context=add_session_state_to_context,
1395
+ metadata=metadata,
1396
+ **kwargs,
1397
+ )
1398
+
1399
+ self.model = cast(Model, self.model)
1358
1400
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1359
1401
 
1360
1402
  # Register run for cancellation tracking
1361
1403
  register_run(run_response.run_id) # type: ignore
1362
1404
 
1363
- # 1. Reason about the task(s) if reasoning is enabled
1405
+ # 3. Reason about the task(s) if reasoning is enabled
1364
1406
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1365
1407
 
1366
1408
  # Check for cancellation before model call
1367
1409
  raise_if_cancelled(run_response.run_id) # type: ignore
1368
1410
 
1369
- # 2. Get the model response for the team leader
1411
+ # 4. Get the model response for the team leader
1370
1412
  model_response = await self.model.aresponse(
1371
1413
  messages=run_messages.messages,
1372
1414
  tools=self._tools_for_model,
@@ -1393,10 +1435,7 @@ class Team:
1393
1435
  else:
1394
1436
  self._scrub_media_from_run_output(run_response)
1395
1437
 
1396
- # 3. Add the run to memory
1397
- session.upsert_run(run_response=run_response)
1398
-
1399
- # 4. Update Team Memory
1438
+ # 5. Update Team Memory
1400
1439
  async for _ in self._amake_memories_and_summaries(
1401
1440
  run_response=run_response,
1402
1441
  session=session,
@@ -1405,18 +1444,21 @@ class Team:
1405
1444
  ):
1406
1445
  pass
1407
1446
 
1408
- # 5. Calculate session metrics
1409
- self._update_session_metrics(session=session)
1410
-
1411
1447
  run_response.status = RunStatus.completed
1412
1448
 
1413
- # 6. Parse team response model
1449
+ # Parse team response model
1414
1450
  self._convert_response_to_structured_format(run_response=run_response)
1415
1451
 
1416
- # 7. Save session to storage
1452
+ # 6. Add the run to memory
1453
+ session.upsert_run(run_response=run_response)
1454
+
1455
+ # 7. Calculate session metrics
1456
+ self._update_session_metrics(session=session)
1457
+
1458
+ # 8. Save session to storage
1417
1459
  self.save_session(session=session)
1418
1460
 
1419
- # 8. Log Team Telemetry
1461
+ # Log Team Telemetry
1420
1462
  await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1421
1463
 
1422
1464
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
@@ -1429,25 +1471,64 @@ class Team:
1429
1471
  async def _arun_stream(
1430
1472
  self,
1431
1473
  run_response: TeamRunOutput,
1432
- run_messages: RunMessages,
1474
+ input_message: Union[str, List, Dict, Message, BaseModel, List[Message]],
1433
1475
  session: TeamSession,
1476
+ session_state: Optional[Dict[str, Any]] = None,
1434
1477
  user_id: Optional[str] = None,
1478
+ images: Optional[Sequence[Image]] = None,
1479
+ videos: Optional[Sequence[Video]] = None,
1480
+ audio: Optional[Sequence[Audio]] = None,
1481
+ files: Optional[Sequence[File]] = None,
1482
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1483
+ add_history_to_context: Optional[bool] = None,
1484
+ add_dependencies_to_context: Optional[bool] = None,
1485
+ add_session_state_to_context: Optional[bool] = None,
1486
+ metadata: Optional[Dict[str, Any]] = None,
1435
1487
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1488
+ dependencies: Optional[Dict[str, Any]] = None,
1436
1489
  stream_intermediate_steps: bool = False,
1437
1490
  workflow_context: Optional[Dict] = None,
1438
1491
  yield_run_response: bool = False,
1492
+ **kwargs: Any,
1439
1493
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
1440
1494
  """Run the Team and return the response.
1441
1495
 
1442
1496
  Steps:
1443
- 1. Reason about the task(s) if reasoning is enabled
1444
- 2. Get a response from the model
1445
- 3. Update Team Memory
1446
- 4. Save session to storage
1447
- 5. Log Team Run
1497
+ 1. Resolve dependencies
1498
+ 2. Prepare run messages
1499
+ 3. Reason about the task(s) if reasoning is enabled
1500
+ 4. Get a response from the model
1501
+ 5. Update Team Memory
1502
+ 6. Add RunOutput to Team Session
1503
+ 7. Calculate session metrics
1504
+ 8. Save session to storage
1448
1505
  """
1449
- log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1450
1506
 
1507
+ # 1. Resolve callable dependencies if present
1508
+ if dependencies is not None:
1509
+ await self._aresolve_run_dependencies(dependencies=dependencies)
1510
+
1511
+ # 2. Prepare run messages
1512
+ run_messages = self._get_run_messages(
1513
+ run_response=run_response,
1514
+ session=session,
1515
+ session_state=session_state,
1516
+ user_id=user_id,
1517
+ input_message=input_message,
1518
+ audio=audio,
1519
+ images=images,
1520
+ videos=videos,
1521
+ files=files,
1522
+ knowledge_filters=knowledge_filters,
1523
+ add_history_to_context=add_history_to_context,
1524
+ dependencies=dependencies,
1525
+ add_dependencies_to_context=add_dependencies_to_context,
1526
+ add_session_state_to_context=add_session_state_to_context,
1527
+ metadata=metadata,
1528
+ **kwargs,
1529
+ )
1530
+
1531
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1451
1532
  # Register run for cancellation tracking
1452
1533
  register_run(run_response.run_id) # type: ignore
1453
1534
 
@@ -1458,7 +1539,7 @@ class Team:
1458
1539
  create_team_run_started_event(from_run_response=run_response), run_response, workflow_context
1459
1540
  )
1460
1541
 
1461
- # 1. Reason about the task(s) if reasoning is enabled
1542
+ # 3. Reason about the task(s) if reasoning is enabled
1462
1543
  async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
1463
1544
  raise_if_cancelled(run_response.run_id) # type: ignore
1464
1545
  yield item
@@ -1466,7 +1547,7 @@ class Team:
1466
1547
  # Check for cancellation before model processing
1467
1548
  raise_if_cancelled(run_response.run_id) # type: ignore
1468
1549
 
1469
- # 2. Get a response from the model
1550
+ # 4. Get a response from the model
1470
1551
  if self.output_model is None:
1471
1552
  async for event in self._ahandle_model_response_stream(
1472
1553
  session=session,
@@ -1518,10 +1599,7 @@ class Team:
1518
1599
  ):
1519
1600
  yield event
1520
1601
 
1521
- # 3. Add the run to memory
1522
- session.upsert_run(run_response=run_response)
1523
-
1524
- # 4. Update Team Memory
1602
+ # 6. Update Team Memory
1525
1603
  async for event in self._amake_memories_and_summaries(
1526
1604
  run_response=run_response,
1527
1605
  session=session,
@@ -1530,16 +1608,19 @@ class Team:
1530
1608
  ):
1531
1609
  yield event
1532
1610
 
1533
- # 5. Calculate session metrics
1534
- self._update_session_metrics(session=session)
1535
-
1536
1611
  run_response.status = RunStatus.completed
1537
1612
 
1613
+ # 7. Add the run to memory
1614
+ session.upsert_run(run_response=run_response)
1615
+
1616
+ # 8. Calculate session metrics
1617
+ self._update_session_metrics(session=session)
1618
+
1538
1619
  completed_event = self._handle_event(
1539
1620
  create_team_run_completed_event(from_run_response=run_response), run_response, workflow_context
1540
1621
  )
1541
1622
 
1542
- # 6. Save session to storage
1623
+ # 9. Save session to storage
1543
1624
  self.save_session(session=session)
1544
1625
 
1545
1626
  if stream_intermediate_steps:
@@ -1548,7 +1629,7 @@ class Team:
1548
1629
  if yield_run_response:
1549
1630
  yield run_response
1550
1631
 
1551
- # 7. Log Team Telemetry
1632
+ # Log Team Telemetry
1552
1633
  await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1553
1634
 
1554
1635
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
@@ -1681,10 +1762,6 @@ class Team:
1681
1762
  # Determine run dependencies (runtime override takes priority)
1682
1763
  run_dependencies = dependencies if dependencies is not None else self.dependencies
1683
1764
 
1684
- # Resolve callable dependencies if present
1685
- if run_dependencies is not None:
1686
- self._resolve_run_dependencies(dependencies=run_dependencies)
1687
-
1688
1765
  # Determine runtime context parameters
1689
1766
  add_dependencies = (
1690
1767
  add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
@@ -1781,43 +1858,49 @@ class Team:
1781
1858
  for attempt in range(num_attempts):
1782
1859
  # Run the team
1783
1860
  try:
1784
- run_messages = self._get_run_messages(
1785
- run_response=run_response,
1786
- session=team_session, # type: ignore
1787
- session_state=session_state,
1788
- user_id=user_id,
1789
- input_message=validated_input,
1790
- audio=audio,
1791
- images=images,
1792
- videos=videos,
1793
- files=files,
1794
- knowledge_filters=effective_filters,
1795
- add_history_to_context=add_history,
1796
- dependencies=run_dependencies,
1797
- add_dependencies_to_context=add_dependencies,
1798
- add_session_state_to_context=add_session_state,
1799
- **kwargs,
1800
- )
1801
-
1802
1861
  if stream:
1803
1862
  response_iterator = self._arun_stream(
1804
1863
  run_response=run_response,
1805
- run_messages=run_messages,
1864
+ input_message=validated_input,
1806
1865
  session=team_session, # type: ignore
1866
+ session_state=session_state,
1807
1867
  user_id=user_id,
1868
+ audio=audio,
1869
+ images=images,
1870
+ videos=videos,
1871
+ files=files,
1872
+ knowledge_filters=effective_filters,
1873
+ add_history_to_context=add_history,
1874
+ add_dependencies_to_context=add_dependencies,
1875
+ add_session_state_to_context=add_session_state,
1876
+ metadata=metadata,
1808
1877
  response_format=response_format,
1878
+ dependencies=run_dependencies,
1809
1879
  stream_intermediate_steps=stream_intermediate_steps,
1810
1880
  workflow_context=workflow_context,
1811
1881
  yield_run_response=yield_run_response,
1882
+ **kwargs,
1812
1883
  )
1813
1884
  return response_iterator # type: ignore
1814
1885
  else:
1815
1886
  return self._arun( # type: ignore
1816
1887
  run_response=run_response,
1817
- run_messages=run_messages,
1888
+ input_message=validated_input,
1818
1889
  session=team_session, # type: ignore
1819
1890
  user_id=user_id,
1891
+ session_state=session_state,
1892
+ audio=audio,
1893
+ images=images,
1894
+ videos=videos,
1895
+ files=files,
1896
+ knowledge_filters=effective_filters,
1897
+ add_history_to_context=add_history,
1898
+ add_dependencies_to_context=add_dependencies,
1899
+ add_session_state_to_context=add_session_state,
1900
+ metadata=metadata,
1820
1901
  response_format=response_format,
1902
+ dependencies=run_dependencies,
1903
+ **kwargs,
1821
1904
  )
1822
1905
 
1823
1906
  except ModelProviderError as e:
@@ -2284,11 +2367,13 @@ class Team:
2284
2367
  tc.tool_call_id: i for i, tc in enumerate(run_response.tools) if tc.tool_call_id is not None
2285
2368
  }
2286
2369
  # Process tool calls
2287
- for tool_call_dict in tool_executions_list:
2288
- tool_call_id = tool_call_dict.tool_call_id or ""
2370
+ for tool_execution in tool_executions_list:
2371
+ tool_call_id = tool_execution.tool_call_id or ""
2289
2372
  index = tool_call_index_map.get(tool_call_id)
2290
2373
  if index is not None:
2291
- run_response.tools[index] = tool_call_dict
2374
+ if run_response.tools[index].child_run_id is not None:
2375
+ tool_execution.child_run_id = run_response.tools[index].child_run_id
2376
+ run_response.tools[index] = tool_execution
2292
2377
  else:
2293
2378
  run_response.tools = tool_executions_list
2294
2379
 
@@ -3731,7 +3816,7 @@ class Team:
3731
3816
 
3732
3817
  try:
3733
3818
  sig = signature(value)
3734
- resolved_value = value(agent=self) if "agent" in sig.parameters else value()
3819
+ resolved_value = value(team=self) if "team" in sig.parameters else value()
3735
3820
 
3736
3821
  if iscoroutine(resolved_value):
3737
3822
  resolved_value = await resolved_value
@@ -4392,8 +4477,8 @@ class Team:
4392
4477
  f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
4393
4478
  )
4394
4479
 
4395
- if self.add_session_state_to_context:
4396
- system_message_content += f"<session_state>\n{session_state}\n</session_state>\n\n"
4480
+ if self.add_session_state_to_context and session_state is not None:
4481
+ system_message_content += self._get_formatted_session_state_for_system_message(session_state)
4397
4482
 
4398
4483
  # Add the JSON output prompt if output_schema is provided and structured_outputs is False
4399
4484
  if (
@@ -4406,6 +4491,9 @@ class Team:
4406
4491
 
4407
4492
  return Message(role=self.system_message_role, content=system_message_content.strip())
4408
4493
 
4494
+ def _get_formatted_session_state_for_system_message(self, session_state: Dict[str, Any]) -> str:
4495
+ return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
4496
+
4409
4497
  def _get_run_messages(
4410
4498
  self,
4411
4499
  *,
@@ -4597,7 +4685,13 @@ class Team:
4597
4685
  # If message is provided as a dict, try to validate it as a Message
4598
4686
  elif isinstance(input_message, dict):
4599
4687
  try:
4600
- return Message.model_validate(input_message)
4688
+ if self.input_schema and is_typed_dict(self.input_schema):
4689
+ import json
4690
+
4691
+ content = json.dumps(input_message, indent=2, ensure_ascii=False)
4692
+ return Message(role="user", content=content)
4693
+ else:
4694
+ return Message.model_validate(input_message)
4601
4695
  except Exception as e:
4602
4696
  log_warning(f"Failed to validate input: {e}")
4603
4697
 
@@ -5158,6 +5252,12 @@ class Team:
5158
5252
  if member_agent_run_response is not None:
5159
5253
  member_agent_run_response.parent_run_id = run_response.run_id # type: ignore
5160
5254
 
5255
+ # Update the top-level team run_response tool call to have the run_id of the member run
5256
+ if run_response.tools is not None:
5257
+ for tool in run_response.tools:
5258
+ if tool.tool_name and tool.tool_name.lower() == "delegate_task_to_member":
5259
+ tool.child_run_id = member_agent_run_response.run_id # type: ignore
5260
+
5161
5261
  # Update the team run context
5162
5262
  member_name = member_agent.name if member_agent.name else member_agent.id if member_agent.id else "Unknown"
5163
5263
  if isinstance(member_agent_task, str):
agno/tools/mem0.py CHANGED
@@ -2,7 +2,6 @@ import json
2
2
  from os import getenv
3
3
  from typing import Any, Dict, List, Optional, Union
4
4
 
5
- from agno.agent import Agent
6
5
  from agno.tools import Toolkit
7
6
  from agno.utils.log import log_debug, log_error, log_warning
8
7
 
@@ -69,15 +68,13 @@ class Mem0Tools(Toolkit):
69
68
  def _get_user_id(
70
69
  self,
71
70
  method_name: str,
72
- agent: Optional[Agent] = None,
71
+ session_state: Dict[str, Any],
73
72
  ) -> str:
74
73
  """Resolve the user ID"""
75
74
  resolved_user_id = self.user_id
76
- if not resolved_user_id and agent is not None:
75
+ if not resolved_user_id:
77
76
  try:
78
- session_state = getattr(agent, "session_state", None)
79
- if isinstance(session_state, dict):
80
- resolved_user_id = session_state.get("current_user_id")
77
+ resolved_user_id = session_state.get("current_user_id")
81
78
  except Exception:
82
79
  pass
83
80
  if not resolved_user_id:
@@ -88,7 +85,7 @@ class Mem0Tools(Toolkit):
88
85
 
89
86
  def add_memory(
90
87
  self,
91
- agent: Agent,
88
+ session_state,
92
89
  content: Union[str, Dict[str, str]],
93
90
  ) -> str:
94
91
  """Add facts to the user's memory.
@@ -101,7 +98,7 @@ class Mem0Tools(Toolkit):
101
98
  str: JSON-encoded Mem0 response or an error message.
102
99
  """
103
100
 
104
- resolved_user_id = self._get_user_id("add_memory", agent=agent)
101
+ resolved_user_id = self._get_user_id("add_memory", session_state=session_state)
105
102
  if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in add_memory:"):
106
103
  return resolved_user_id
107
104
  try:
@@ -116,7 +113,6 @@ class Mem0Tools(Toolkit):
116
113
  messages_list,
117
114
  user_id=resolved_user_id,
118
115
  infer=self.infer,
119
- output_format="v1.1",
120
116
  )
121
117
  return json.dumps(result)
122
118
  except Exception as e:
@@ -125,19 +121,18 @@ class Mem0Tools(Toolkit):
125
121
 
126
122
  def search_memory(
127
123
  self,
128
- agent: Agent,
124
+ session_state: Dict[str, Any],
129
125
  query: str,
130
126
  ) -> str:
131
127
  """Semantic search for *query* across the user's stored memories."""
132
128
 
133
- resolved_user_id = self._get_user_id("search_memory", agent=agent)
129
+ resolved_user_id = self._get_user_id("search_memory", session_state=session_state)
134
130
  if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in search_memory:"):
135
131
  return resolved_user_id
136
132
  try:
137
133
  results = self.client.search(
138
134
  query=query,
139
135
  user_id=resolved_user_id,
140
- output_format="v1.1",
141
136
  )
142
137
 
143
138
  if isinstance(results, dict) and "results" in results:
@@ -156,16 +151,15 @@ class Mem0Tools(Toolkit):
156
151
  log_error(f"Error searching memory: {e}")
157
152
  return f"Error searching memory: {e}"
158
153
 
159
- def get_all_memories(self, agent: Agent) -> str:
154
+ def get_all_memories(self, session_state: Dict[str, Any]) -> str:
160
155
  """Return **all** memories for the current user as a JSON string."""
161
156
 
162
- resolved_user_id = self._get_user_id("get_all_memories", agent=agent)
157
+ resolved_user_id = self._get_user_id("get_all_memories", session_state=session_state)
163
158
  if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in get_all_memories:"):
164
159
  return resolved_user_id
165
160
  try:
166
161
  results = self.client.get_all(
167
162
  user_id=resolved_user_id,
168
- output_format="v1.1",
169
163
  )
170
164
 
171
165
  if isinstance(results, dict) and "results" in results:
@@ -183,10 +177,10 @@ class Mem0Tools(Toolkit):
183
177
  log_error(f"Error getting all memories: {e}")
184
178
  return f"Error getting all memories: {e}"
185
179
 
186
- def delete_all_memories(self, agent: Agent) -> str:
180
+ def delete_all_memories(self, session_state: Dict[str, Any]) -> str:
187
181
  """Delete *all* memories associated with the current user"""
188
182
 
189
- resolved_user_id = self._get_user_id("delete_all_memories", agent=agent)
183
+ resolved_user_id = self._get_user_id("delete_all_memories", session_state=session_state)
190
184
  if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in delete_all_memories:"):
191
185
  error_msg = resolved_user_id
192
186
  log_error(error_msg)