agno 2.1.4__py3-none-any.whl → 2.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. agno/agent/agent.py +1775 -538
  2. agno/db/async_postgres/__init__.py +3 -0
  3. agno/db/async_postgres/async_postgres.py +1668 -0
  4. agno/db/async_postgres/schemas.py +124 -0
  5. agno/db/async_postgres/utils.py +289 -0
  6. agno/db/base.py +237 -2
  7. agno/db/dynamo/dynamo.py +2 -2
  8. agno/db/firestore/firestore.py +2 -2
  9. agno/db/firestore/utils.py +4 -2
  10. agno/db/gcs_json/gcs_json_db.py +2 -2
  11. agno/db/in_memory/in_memory_db.py +2 -2
  12. agno/db/json/json_db.py +2 -2
  13. agno/db/migrations/v1_to_v2.py +43 -13
  14. agno/db/mongo/mongo.py +14 -6
  15. agno/db/mongo/utils.py +0 -4
  16. agno/db/mysql/mysql.py +23 -13
  17. agno/db/postgres/postgres.py +17 -6
  18. agno/db/redis/redis.py +2 -2
  19. agno/db/singlestore/singlestore.py +19 -10
  20. agno/db/sqlite/sqlite.py +22 -12
  21. agno/db/sqlite/utils.py +8 -3
  22. agno/db/surrealdb/__init__.py +3 -0
  23. agno/db/surrealdb/metrics.py +292 -0
  24. agno/db/surrealdb/models.py +259 -0
  25. agno/db/surrealdb/queries.py +71 -0
  26. agno/db/surrealdb/surrealdb.py +1193 -0
  27. agno/db/surrealdb/utils.py +87 -0
  28. agno/eval/accuracy.py +50 -43
  29. agno/eval/performance.py +6 -3
  30. agno/eval/reliability.py +6 -3
  31. agno/eval/utils.py +33 -16
  32. agno/exceptions.py +8 -2
  33. agno/knowledge/knowledge.py +260 -46
  34. agno/knowledge/reader/pdf_reader.py +4 -6
  35. agno/knowledge/reader/reader_factory.py +2 -3
  36. agno/memory/manager.py +254 -46
  37. agno/models/anthropic/claude.py +37 -0
  38. agno/os/app.py +8 -7
  39. agno/os/interfaces/a2a/router.py +3 -5
  40. agno/os/interfaces/agui/router.py +4 -1
  41. agno/os/interfaces/agui/utils.py +27 -6
  42. agno/os/interfaces/slack/router.py +2 -4
  43. agno/os/mcp.py +98 -41
  44. agno/os/router.py +23 -0
  45. agno/os/routers/evals/evals.py +52 -20
  46. agno/os/routers/evals/utils.py +14 -14
  47. agno/os/routers/knowledge/knowledge.py +130 -9
  48. agno/os/routers/knowledge/schemas.py +57 -0
  49. agno/os/routers/memory/memory.py +116 -44
  50. agno/os/routers/metrics/metrics.py +16 -6
  51. agno/os/routers/session/session.py +65 -22
  52. agno/os/schema.py +36 -0
  53. agno/os/utils.py +64 -11
  54. agno/reasoning/anthropic.py +80 -0
  55. agno/reasoning/gemini.py +73 -0
  56. agno/reasoning/openai.py +5 -0
  57. agno/reasoning/vertexai.py +76 -0
  58. agno/session/workflow.py +3 -3
  59. agno/team/team.py +968 -179
  60. agno/tools/googlesheets.py +20 -5
  61. agno/tools/mcp_toolbox.py +3 -3
  62. agno/tools/scrapegraph.py +1 -1
  63. agno/utils/models/claude.py +3 -1
  64. agno/utils/streamlit.py +1 -1
  65. agno/vectordb/base.py +22 -1
  66. agno/vectordb/cassandra/cassandra.py +9 -0
  67. agno/vectordb/chroma/chromadb.py +26 -6
  68. agno/vectordb/clickhouse/clickhousedb.py +9 -1
  69. agno/vectordb/couchbase/couchbase.py +11 -0
  70. agno/vectordb/lancedb/lance_db.py +20 -0
  71. agno/vectordb/langchaindb/langchaindb.py +11 -0
  72. agno/vectordb/lightrag/lightrag.py +9 -0
  73. agno/vectordb/llamaindex/llamaindexdb.py +15 -1
  74. agno/vectordb/milvus/milvus.py +23 -0
  75. agno/vectordb/mongodb/mongodb.py +22 -0
  76. agno/vectordb/pgvector/pgvector.py +19 -0
  77. agno/vectordb/pineconedb/pineconedb.py +35 -4
  78. agno/vectordb/qdrant/qdrant.py +24 -0
  79. agno/vectordb/singlestore/singlestore.py +25 -17
  80. agno/vectordb/surrealdb/surrealdb.py +18 -2
  81. agno/vectordb/upstashdb/upstashdb.py +26 -1
  82. agno/vectordb/weaviate/weaviate.py +18 -0
  83. agno/workflow/condition.py +4 -0
  84. agno/workflow/loop.py +4 -0
  85. agno/workflow/parallel.py +4 -0
  86. agno/workflow/router.py +4 -0
  87. agno/workflow/step.py +30 -14
  88. agno/workflow/steps.py +4 -0
  89. agno/workflow/types.py +2 -2
  90. agno/workflow/workflow.py +328 -61
  91. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/METADATA +100 -41
  92. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/RECORD +95 -82
  93. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/WHEEL +0 -0
  94. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/licenses/LICENSE +0 -0
  95. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/top_level.txt +0 -0
agno/team/team.py CHANGED
@@ -30,7 +30,7 @@ from uuid import uuid4
30
30
  from pydantic import BaseModel
31
31
 
32
32
  from agno.agent import Agent
33
- from agno.db.base import BaseDb, SessionType, UserMemory
33
+ from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
34
34
  from agno.exceptions import (
35
35
  InputCheckError,
36
36
  ModelProviderError,
@@ -214,7 +214,7 @@ class Team:
214
214
 
215
215
  # --- Database ---
216
216
  # Database to use for this agent
217
- db: Optional[BaseDb] = None
217
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None
218
218
 
219
219
  # Memory manager to use for this agent
220
220
  memory_manager: Optional[MemoryManager] = None
@@ -259,6 +259,10 @@ class Team:
259
259
  send_media_to_model: bool = True
260
260
  # If True, store media in run output
261
261
  store_media: bool = True
262
+ # If True, store tool results in run output
263
+ store_tool_messages: bool = True
264
+ # If True, store history messages in run output
265
+ store_history_messages: bool = True
262
266
 
263
267
  # --- Team Tools ---
264
268
  # A list of tools provided to the Model.
@@ -418,6 +422,8 @@ class Team:
418
422
  search_knowledge: bool = True,
419
423
  read_team_history: bool = False,
420
424
  store_media: bool = True,
425
+ store_tool_messages: bool = True,
426
+ store_history_messages: bool = True,
421
427
  send_media_to_model: bool = True,
422
428
  tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None,
423
429
  tool_call_limit: Optional[int] = None,
@@ -433,7 +439,7 @@ class Team:
433
439
  output_model_prompt: Optional[str] = None,
434
440
  use_json_mode: bool = False,
435
441
  parse_response: bool = True,
436
- db: Optional[BaseDb] = None,
442
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
437
443
  enable_agentic_memory: bool = False,
438
444
  enable_user_memories: bool = False,
439
445
  add_memories_to_context: Optional[bool] = None,
@@ -518,6 +524,8 @@ class Team:
518
524
  self.read_team_history = read_team_history
519
525
 
520
526
  self.store_media = store_media
527
+ self.store_tool_messages = store_tool_messages
528
+ self.store_history_messages = store_history_messages
521
529
  self.send_media_to_model = send_media_to_model
522
530
 
523
531
  self.tools = tools
@@ -792,6 +800,10 @@ class Team:
792
800
 
793
801
  return session_id, user_id, session_state # type: ignore
794
802
 
803
+ def _has_async_db(self) -> bool:
804
+ """Return True if the db the team is equipped with is an Async implementation"""
805
+ return self.db is not None and isinstance(self.db, AsyncBaseDb)
806
+
795
807
  def initialize_team(self, debug_mode: Optional[bool] = None) -> None:
796
808
  # Make sure for the team, we are using the team logger
797
809
  use_team_logger()
@@ -1205,7 +1217,11 @@ class Team:
1205
1217
  )
1206
1218
  deque(response_iterator, maxlen=0)
1207
1219
 
1208
- # 10. Save session to storage
1220
+ # 10. Scrub the stored run based on storage flags
1221
+ if self._scrub_run_output_for_storage(run_response):
1222
+ session.upsert_run(run_response=run_response)
1223
+
1224
+ # 11. Save session to storage
1209
1225
  self.save_session(session=session)
1210
1226
 
1211
1227
  # Log Team Telemetry
@@ -1410,7 +1426,11 @@ class Team:
1410
1426
  # 8. Calculate session metrics
1411
1427
  self._update_session_metrics(session=session)
1412
1428
 
1413
- # 9. Save session to storage
1429
+ # 9. Scrub the stored run based on storage flags
1430
+ if self._scrub_run_output_for_storage(run_response):
1431
+ session.upsert_run(run_response=run_response)
1432
+
1433
+ # 10. Save session to storage
1414
1434
  self.save_session(session=session)
1415
1435
 
1416
1436
  if stream_intermediate_steps:
@@ -1519,6 +1539,8 @@ class Team:
1519
1539
  **kwargs: Any,
1520
1540
  ) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
1521
1541
  """Run the Team and return the response."""
1542
+ if self._has_async_db():
1543
+ raise Exception("run() is not supported with an async DB. Please use arun() instead.")
1522
1544
 
1523
1545
  # Create a run_id for this specific run
1524
1546
  run_id = str(uuid4())
@@ -1737,45 +1759,65 @@ class Team:
1737
1759
  async def _arun(
1738
1760
  self,
1739
1761
  run_response: TeamRunOutput,
1740
- session: TeamSession,
1741
- session_state: Dict[str, Any],
1762
+ session_id: str,
1763
+ session_state: Optional[Dict[str, Any]] = None,
1742
1764
  user_id: Optional[str] = None,
1743
- knowledge_filters: Optional[Dict[str, Any]] = None,
1744
- add_history_to_context: Optional[bool] = None,
1765
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1745
1766
  add_dependencies_to_context: Optional[bool] = None,
1746
1767
  add_session_state_to_context: Optional[bool] = None,
1768
+ add_history_to_context: Optional[bool] = None,
1769
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1747
1770
  metadata: Optional[Dict[str, Any]] = None,
1748
- response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1749
- dependencies: Optional[Dict[str, Any]] = None,
1771
+ audio: Optional[Sequence[Audio]] = None,
1772
+ images: Optional[Sequence[Image]] = None,
1773
+ videos: Optional[Sequence[Video]] = None,
1774
+ files: Optional[Sequence[File]] = None,
1750
1775
  debug_mode: Optional[bool] = None,
1776
+ dependencies: Optional[Dict[str, Any]] = None,
1751
1777
  **kwargs: Any,
1752
1778
  ) -> TeamRunOutput:
1753
1779
  """Run the Team and return the response.
1754
1780
 
1755
1781
  Steps:
1756
- 1. Resolve dependencies
1757
- 2. Execute pre-hooks
1758
- 3. Prepare run messages
1759
- 4. Reason about the task(s) if reasoning is enabled
1760
- 5. Get a response from the model
1761
- 6. Add RunOutput to Team Session
1762
- 7. Calculate session metrics
1763
- 8. Update Team Memory
1764
- 9. Save session to storage
1782
+ 1. Read or create session
1783
+ 2. Update metadata and session state
1784
+ 3. Execute pre-hooks
1785
+ 4. Determine tools for model
1786
+ 5. Prepare run messages
1787
+ 6. Reason about the task if reasoning is enabled
1788
+ 7. Get a response from the Model (includes running function calls)
1789
+ 8. Update TeamRunOutput
1790
+ 9. Add the run to memory
1791
+ 10. Calculate session metrics
1792
+ 11. Parse team response model
1793
+ 12. Update Team Memory
1794
+ 13. Scrub the stored run if needed
1795
+ 14. Save session to storage
1796
+ 15. Execute post-hooks
1765
1797
  """
1766
- # 1. Resolve callable dependencies if present
1767
- if dependencies is not None:
1768
- await self._aresolve_run_dependencies(dependencies=dependencies)
1798
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1799
+
1800
+ register_run(run_response.run_id) # type: ignore
1801
+
1802
+ # 1. Read or create session. Reads from the database if provided.
1803
+ if self._has_async_db():
1804
+ team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
1805
+ else:
1806
+ team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
1807
+
1808
+ # 2. Update metadata and session state
1809
+ self._update_metadata(session=team_session)
1810
+ session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
1769
1811
 
1770
1812
  run_input = cast(TeamRunInput, run_response.input)
1771
- self.model = cast(Model, self.model)
1772
- # 2. Execute pre-hooks after session is loaded but before processing starts
1813
+
1814
+ # 3. Execute pre-hooks after session is loaded but before processing starts
1773
1815
  if self.pre_hooks is not None:
1774
1816
  pre_hook_iterator = self._aexecute_pre_hooks(
1775
1817
  hooks=self.pre_hooks, # type: ignore
1776
1818
  run_response=run_response,
1777
1819
  run_input=run_input,
1778
- session=session,
1820
+ session=team_session,
1779
1821
  user_id=user_id,
1780
1822
  debug_mode=debug_mode,
1781
1823
  **kwargs,
@@ -1785,14 +1827,14 @@ class Team:
1785
1827
  async for _ in pre_hook_iterator:
1786
1828
  pass
1787
1829
 
1788
- # Initialize the team run context
1830
+ # 4. Determine tools for model
1789
1831
  team_run_context: Dict[str, Any] = {}
1790
-
1832
+ self.model = cast(Model, self.model)
1791
1833
  self.determine_tools_for_model(
1792
1834
  model=self.model,
1793
1835
  run_response=run_response,
1794
1836
  team_run_context=team_run_context,
1795
- session=session,
1837
+ session=team_session,
1796
1838
  session_state=session_state,
1797
1839
  user_id=user_id,
1798
1840
  async_mode=True,
@@ -1810,39 +1852,36 @@ class Team:
1810
1852
  metadata=metadata,
1811
1853
  )
1812
1854
 
1813
- # 3. Prepare run messages
1814
- run_messages = self._get_run_messages(
1855
+ # 5. Prepare run messages
1856
+ run_messages = await self._aget_run_messages(
1815
1857
  run_response=run_response,
1816
- session=session,
1858
+ session=team_session, # type: ignore
1817
1859
  session_state=session_state,
1818
1860
  user_id=user_id,
1819
1861
  input_message=run_input.input_content,
1820
- audio=run_input.audios,
1821
- images=run_input.images,
1822
- videos=run_input.videos,
1823
- files=run_input.files,
1862
+ audio=audio,
1863
+ images=images,
1864
+ videos=videos,
1865
+ files=files,
1824
1866
  knowledge_filters=knowledge_filters,
1825
1867
  add_history_to_context=add_history_to_context,
1826
1868
  dependencies=dependencies,
1827
1869
  add_dependencies_to_context=add_dependencies_to_context,
1828
1870
  add_session_state_to_context=add_session_state_to_context,
1829
- metadata=metadata,
1830
1871
  **kwargs,
1831
1872
  )
1832
1873
 
1833
- self.model = cast(Model, self.model)
1834
- log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1835
-
1836
1874
  # Register run for cancellation tracking
1837
1875
  register_run(run_response.run_id) # type: ignore
1838
1876
 
1839
- # 4. Reason about the task(s) if reasoning is enabled
1877
+ # 6. Reason about the task(s) if reasoning is enabled
1840
1878
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1841
1879
 
1842
1880
  # Check for cancellation before model call
1843
1881
  raise_if_cancelled(run_response.run_id) # type: ignore
1844
1882
 
1845
- # 5. Get the model response for the team leader
1883
+ # 7. Get the model response for the team leader
1884
+ self.model = cast(Model, self.model)
1846
1885
  model_response = await self.model.aresponse(
1847
1886
  messages=run_messages.messages,
1848
1887
  tools=self._tools_for_model,
@@ -1852,60 +1891,65 @@ class Team:
1852
1891
  response_format=response_format,
1853
1892
  send_media_to_model=self.send_media_to_model,
1854
1893
  ) # type: ignore
1855
-
1856
- # Check for cancellation after model call
1857
1894
  raise_if_cancelled(run_response.run_id) # type: ignore
1858
1895
 
1859
1896
  # If an output model is provided, generate output using the output model
1860
1897
  await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
1861
-
1862
1898
  # If a parser model is provided, structure the response separately
1863
1899
  await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
1864
1900
 
1865
- # Update TeamRunOutput
1901
+ # 8. Update TeamRunOutput
1866
1902
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
1867
1903
 
1904
+ # Optional: Store media
1868
1905
  if self.store_media:
1869
1906
  self._store_media(run_response, model_response)
1870
1907
  else:
1871
1908
  self._scrub_media_from_run_output(run_response)
1872
1909
 
1910
+ # 9. Add the run to memory
1911
+ team_session.upsert_run(run_response=run_response)
1912
+
1913
+ # 10. Calculate session metrics
1914
+ self._update_session_metrics(session=team_session)
1915
+
1873
1916
  run_response.status = RunStatus.completed
1874
1917
 
1875
- # Parse team response model
1918
+ # 11. Parse team response model
1876
1919
  self._convert_response_to_structured_format(run_response=run_response)
1877
1920
 
1878
1921
  # Set the run duration
1879
1922
  if run_response.metrics:
1880
1923
  run_response.metrics.stop_timer()
1881
1924
 
1882
- # 6. Add the run to session
1883
- session.upsert_run(run_response=run_response)
1884
-
1885
- # 6. Update Team Memory
1925
+ # 12. Update Team Memory
1886
1926
  async for _ in self._amake_memories_and_summaries(
1887
1927
  run_response=run_response,
1888
- session=session,
1928
+ session=team_session,
1889
1929
  run_messages=run_messages,
1890
1930
  user_id=user_id,
1891
1931
  ):
1892
1932
  pass
1893
1933
 
1894
- # 7. Calculate session metrics
1895
- self._update_session_metrics(session=session)
1934
+ # 13. Scrub the stored run based on storage flags
1935
+ if self._scrub_run_output_for_storage(run_response):
1936
+ team_session.upsert_run(run_response=run_response)
1896
1937
 
1897
- # 8. Save session to storage
1898
- self.save_session(session=session)
1938
+ # 14. Save session to storage
1939
+ if self._has_async_db():
1940
+ await self.asave_session(session=team_session)
1941
+ else:
1942
+ self.save_session(session=team_session)
1899
1943
 
1900
- # Log Team Telemetry
1901
- await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1944
+ # Log Team Telemetry
1945
+ await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
1902
1946
 
1903
- # Execute post-hooks after output is generated but before response is returned
1947
+ # 15. Execute post-hooks after output is generated but before response is returned
1904
1948
  if self.post_hooks is not None:
1905
1949
  await self._aexecute_post_hooks(
1906
1950
  hooks=self.post_hooks, # type: ignore
1907
1951
  run_output=run_response,
1908
- session=session,
1952
+ session=team_session,
1909
1953
  user_id=user_id,
1910
1954
  debug_mode=debug_mode,
1911
1955
  **kwargs,
@@ -1913,7 +1957,6 @@ class Team:
1913
1957
 
1914
1958
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
1915
1959
 
1916
- # Always clean up the run tracking
1917
1960
  cleanup_run(run_response.run_id) # type: ignore
1918
1961
 
1919
1962
  return run_response
@@ -1921,40 +1964,60 @@ class Team:
1921
1964
  async def _arun_stream(
1922
1965
  self,
1923
1966
  run_response: TeamRunOutput,
1924
- session: TeamSession,
1925
- session_state: Dict[str, Any],
1967
+ session_id: str,
1968
+ session_state: Optional[Dict[str, Any]] = None,
1926
1969
  user_id: Optional[str] = None,
1927
- knowledge_filters: Optional[Dict[str, Any]] = None,
1928
- add_history_to_context: Optional[bool] = None,
1929
- add_dependencies_to_context: Optional[bool] = None,
1930
- add_session_state_to_context: Optional[bool] = None,
1931
- metadata: Optional[Dict[str, Any]] = None,
1932
1970
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1933
- dependencies: Optional[Dict[str, Any]] = None,
1934
1971
  stream_intermediate_steps: bool = False,
1935
1972
  yield_run_response: bool = False,
1973
+ add_dependencies_to_context: Optional[bool] = None,
1974
+ add_session_state_to_context: Optional[bool] = None,
1975
+ add_history_to_context: Optional[bool] = None,
1976
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1977
+ metadata: Optional[Dict[str, Any]] = None,
1978
+ audio: Optional[Sequence[Audio]] = None,
1979
+ images: Optional[Sequence[Image]] = None,
1980
+ videos: Optional[Sequence[Video]] = None,
1981
+ files: Optional[Sequence[File]] = None,
1936
1982
  debug_mode: Optional[bool] = None,
1983
+ dependencies: Optional[Dict[str, Any]] = None,
1937
1984
  **kwargs: Any,
1938
1985
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
1939
1986
  """Run the Team and return the response.
1940
1987
 
1941
1988
  Steps:
1942
1989
  1. Resolve dependencies
1943
- 2. Prepare run messages
1944
- 3. Reason about the task(s) if reasoning is enabled
1945
- 4. Get a response from the model
1946
- 5. Add the run to Team Session
1947
- 6. Update Team Memory
1948
- 7. Create the run completed event
1949
- 8. Calculate session metrics
1950
- 9. Save session to storage
1990
+ 2. Read or create session
1991
+ 3. Update metadata and session state
1992
+ 4. Execute pre-hooks
1993
+ 5. Determine tools for model
1994
+ 6. Prepare run messages
1995
+ 7. Yield the run started event
1996
+ 8. Reason about the task(s) if reasoning is enabled
1997
+ 9. Get a response from the model
1998
+ 10. Add the run to memory
1999
+ 11. Update Team Memory
2000
+ 12. Calculate session metrics
2001
+ 13. Create the run completed event
2002
+ 14. Scrub the stored run if needed
2003
+ 15. Save session to storage
1951
2004
  """
1952
2005
 
1953
- # 1. Resolve callable dependencies if present
2006
+ # 1. Resolve dependencies
1954
2007
  if dependencies is not None:
1955
- await self._aresolve_run_dependencies(dependencies=dependencies)
2008
+ self._resolve_run_dependencies(dependencies=dependencies)
2009
+
2010
+ # 2. Read or create session. Reads from the database if provided.
2011
+ if self._has_async_db():
2012
+ team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
2013
+ else:
2014
+ team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
2015
+
2016
+ # 3. Update metadata and session state
2017
+ self._update_metadata(session=team_session)
2018
+ session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
1956
2019
 
1957
- # Execute pre-hooks
2020
+ # 4. Execute pre-hooks
1958
2021
  run_input = cast(TeamRunInput, run_response.input)
1959
2022
  self.model = cast(Model, self.model)
1960
2023
  if self.pre_hooks is not None:
@@ -1962,7 +2025,7 @@ class Team:
1962
2025
  hooks=self.pre_hooks, # type: ignore
1963
2026
  run_response=run_response,
1964
2027
  run_input=run_input,
1965
- session=session,
2028
+ session=team_session,
1966
2029
  user_id=user_id,
1967
2030
  debug_mode=debug_mode,
1968
2031
  **kwargs,
@@ -1970,42 +2033,40 @@ class Team:
1970
2033
  async for pre_hook_event in pre_hook_iterator:
1971
2034
  yield pre_hook_event
1972
2035
 
1973
- # Initialize the team run context
2036
+ # 5. Determine tools for model
1974
2037
  team_run_context: Dict[str, Any] = {}
1975
-
2038
+ self.model = cast(Model, self.model)
1976
2039
  self.determine_tools_for_model(
1977
2040
  model=self.model,
1978
2041
  run_response=run_response,
1979
2042
  team_run_context=team_run_context,
1980
- session=session,
2043
+ session=team_session, # type: ignore
1981
2044
  session_state=session_state,
1982
2045
  user_id=user_id,
1983
2046
  async_mode=True,
1984
2047
  knowledge_filters=knowledge_filters,
1985
2048
  input_message=run_input.input_content,
1986
- images=run_input.images,
1987
- videos=run_input.videos,
1988
- audio=run_input.audios,
1989
- files=run_input.files,
2049
+ images=images,
2050
+ videos=videos,
2051
+ audio=audio,
2052
+ files=files,
1990
2053
  debug_mode=debug_mode,
1991
2054
  add_history_to_context=add_history_to_context,
1992
- add_dependencies_to_context=add_dependencies_to_context,
1993
- add_session_state_to_context=add_session_state_to_context,
1994
2055
  dependencies=dependencies,
1995
2056
  metadata=metadata,
1996
2057
  )
1997
2058
 
1998
- # 2. Prepare run messages
1999
- run_messages = self._get_run_messages(
2059
+ # 6. Prepare run messages
2060
+ run_messages = await self._aget_run_messages(
2000
2061
  run_response=run_response,
2001
- session=session,
2062
+ session=team_session, # type: ignore
2002
2063
  session_state=session_state,
2003
2064
  user_id=user_id,
2004
2065
  input_message=run_input.input_content,
2005
- audio=run_input.audios,
2006
- images=run_input.images,
2007
- videos=run_input.videos,
2008
- files=run_input.files,
2066
+ audio=audio,
2067
+ images=images,
2068
+ videos=videos,
2069
+ files=files,
2009
2070
  knowledge_filters=knowledge_filters,
2010
2071
  add_history_to_context=add_history_to_context,
2011
2072
  dependencies=dependencies,
@@ -2021,13 +2082,11 @@ class Team:
2021
2082
  register_run(run_response.run_id) # type: ignore
2022
2083
 
2023
2084
  try:
2024
- # Start the Run by yielding a RunStarted event
2085
+ # 7. Yield the run started event
2025
2086
  if stream_intermediate_steps:
2026
- yield self._handle_event(
2027
- create_team_run_started_event(from_run_response=run_response), run_response
2028
- )
2087
+ yield self._handle_event(create_team_run_started_event(from_run_response=run_response), run_response)
2029
2088
 
2030
- # 3. Reason about the task(s) if reasoning is enabled
2089
+ # 8. Reason about the task(s) if reasoning is enabled
2031
2090
  async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
2032
2091
  raise_if_cancelled(run_response.run_id) # type: ignore
2033
2092
  yield item
@@ -2035,10 +2094,10 @@ class Team:
2035
2094
  # Check for cancellation before model processing
2036
2095
  raise_if_cancelled(run_response.run_id) # type: ignore
2037
2096
 
2038
- # 4. Get a response from the model
2097
+ # 9. Get a response from the model
2039
2098
  if self.output_model is None:
2040
2099
  async for event in self._ahandle_model_response_stream(
2041
- session=session,
2100
+ session=team_session,
2042
2101
  run_response=run_response,
2043
2102
  run_messages=run_messages,
2044
2103
  response_format=response_format,
@@ -2048,7 +2107,7 @@ class Team:
2048
2107
  yield event
2049
2108
  else:
2050
2109
  async for event in self._ahandle_model_response_stream(
2051
- session=session,
2110
+ session=team_session,
2052
2111
  run_response=run_response,
2053
2112
  run_messages=run_messages,
2054
2113
  response_format=response_format,
@@ -2067,7 +2126,7 @@ class Team:
2067
2126
  yield event
2068
2127
 
2069
2128
  async for event in self._agenerate_response_with_output_model_stream(
2070
- session=session,
2129
+ session=team_session,
2071
2130
  run_response=run_response,
2072
2131
  run_messages=run_messages,
2073
2132
  stream_intermediate_steps=stream_intermediate_steps,
@@ -2080,38 +2139,41 @@ class Team:
2080
2139
 
2081
2140
  # If a parser model is provided, structure the response separately
2082
2141
  async for event in self._aparse_response_with_parser_model_stream(
2083
- session=session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
2142
+ session=team_session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
2084
2143
  ):
2085
2144
  yield event
2086
2145
 
2087
2146
  run_response.status = RunStatus.completed
2088
2147
 
2089
- # Set the run duration
2090
- if run_response.metrics:
2091
- run_response.metrics.stop_timer()
2092
-
2093
- # 5. Add the run to Team Session
2094
- session.upsert_run(run_response=run_response)
2148
+ # 10. Add the run to memory
2149
+ team_session.upsert_run(run_response=run_response)
2095
2150
 
2096
- # 6. Update Team Memory
2151
+ # 11. Update Team Memory
2097
2152
  async for event in self._amake_memories_and_summaries(
2098
2153
  run_response=run_response,
2099
- session=session,
2154
+ session=team_session,
2100
2155
  run_messages=run_messages,
2101
2156
  user_id=user_id,
2102
2157
  ):
2103
2158
  yield event
2104
2159
 
2105
- # 7. Create the run completed event
2160
+ # 12. Calculate session metrics
2161
+ self._update_session_metrics(session=team_session)
2162
+
2163
+ # 13. Create the run completed event
2106
2164
  completed_event = self._handle_event(
2107
2165
  create_team_run_completed_event(from_run_response=run_response), run_response
2108
2166
  )
2109
2167
 
2110
- # 8. Calculate session metrics
2111
- self._update_session_metrics(session=session)
2168
+ # 14. Scrub the stored run based on storage flags
2169
+ if self._scrub_run_output_for_storage(run_response):
2170
+ team_session.upsert_run(run_response=run_response)
2112
2171
 
2113
- # 9. Save session to storage
2114
- self.save_session(session=session)
2172
+ # 15. Save the session to storage
2173
+ if self._has_async_db():
2174
+ await self.asave_session(session=team_session)
2175
+ else:
2176
+ self.save_session(session=team_session)
2115
2177
 
2116
2178
  if stream_intermediate_steps:
2117
2179
  yield completed_event
@@ -2120,7 +2182,7 @@ class Team:
2120
2182
  yield run_response
2121
2183
 
2122
2184
  # Log Team Telemetry
2123
- await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
2185
+ await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
2124
2186
 
2125
2187
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
2126
2188
 
@@ -2137,8 +2199,11 @@ class Team:
2137
2199
  )
2138
2200
 
2139
2201
  # Add the RunOutput to Team Session even when cancelled
2140
- session.upsert_run(run_response=run_response)
2141
- self.save_session(session=session)
2202
+ team_session.upsert_run(run_response=run_response)
2203
+ if self._has_async_db():
2204
+ await self.asave_session(session=team_session)
2205
+ else:
2206
+ self.save_session(session=team_session)
2142
2207
  finally:
2143
2208
  # Always clean up the run tracking
2144
2209
  cleanup_run(run_response.run_id) # type: ignore
@@ -2245,25 +2310,8 @@ class Team:
2245
2310
  images=images, videos=videos, audios=audio, files=files
2246
2311
  )
2247
2312
 
2248
- # Create RunInput to capture the original user input
2249
- run_input = TeamRunInput(
2250
- input_content=validated_input,
2251
- images=image_artifacts,
2252
- videos=video_artifacts,
2253
- audios=audio_artifacts,
2254
- files=file_artifacts,
2255
- )
2256
-
2257
- team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
2258
- self._update_metadata(session=team_session)
2259
-
2260
- # Update session state from DB
2261
- session_state = self._load_session_state(session=team_session, session_state=session_state)
2262
-
2263
- # Determine run dependencies (runtime override takes priority)
2313
+ # Resolve variables
2264
2314
  run_dependencies = dependencies if dependencies is not None else self.dependencies
2265
-
2266
- # Determine runtime context parameters
2267
2315
  add_dependencies = (
2268
2316
  add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
2269
2317
  )
@@ -2274,10 +2322,14 @@ class Team:
2274
2322
  )
2275
2323
  add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
2276
2324
 
2277
- effective_filters = knowledge_filters
2278
- # When filters are passed manually
2279
- if self.knowledge_filters or knowledge_filters:
2280
- effective_filters = self._get_effective_filters(knowledge_filters)
2325
+ # Create RunInput to capture the original user input
2326
+ run_input = TeamRunInput(
2327
+ input_content=validated_input,
2328
+ images=image_artifacts,
2329
+ videos=video_artifacts,
2330
+ audios=audio_artifacts,
2331
+ files=files,
2332
+ )
2281
2333
 
2282
2334
  # Use stream override value when necessary
2283
2335
  if stream is None:
@@ -2308,6 +2360,11 @@ class Team:
2308
2360
  else:
2309
2361
  metadata = self.metadata
2310
2362
 
2363
+ # Get knowledge filters
2364
+ effective_filters = knowledge_filters
2365
+ if self.knowledge_filters or knowledge_filters:
2366
+ effective_filters = self._get_effective_filters(knowledge_filters)
2367
+
2311
2368
  # Create a new run_response for this attempt
2312
2369
  run_response = TeamRunOutput(
2313
2370
  run_id=run_id,
@@ -2337,8 +2394,9 @@ class Team:
2337
2394
  try:
2338
2395
  if stream:
2339
2396
  response_iterator = self._arun_stream(
2397
+ input=validated_input,
2340
2398
  run_response=run_response,
2341
- session=team_session, # type: ignore
2399
+ session_id=session_id,
2342
2400
  session_state=session_state,
2343
2401
  user_id=user_id,
2344
2402
  knowledge_filters=effective_filters,
@@ -2356,18 +2414,23 @@ class Team:
2356
2414
  return response_iterator # type: ignore
2357
2415
  else:
2358
2416
  return self._arun( # type: ignore
2417
+ input=validated_input,
2359
2418
  run_response=run_response,
2360
- session=team_session, # type: ignore
2361
- user_id=user_id,
2419
+ session_id=session_id,
2362
2420
  session_state=session_state,
2421
+ user_id=user_id,
2422
+ audio=audio,
2423
+ images=images,
2424
+ videos=videos,
2425
+ files=files,
2363
2426
  knowledge_filters=effective_filters,
2364
2427
  add_history_to_context=add_history,
2365
2428
  add_dependencies_to_context=add_dependencies,
2366
2429
  add_session_state_to_context=add_session_state,
2367
2430
  metadata=metadata,
2368
2431
  response_format=response_format,
2369
- dependencies=run_dependencies,
2370
2432
  debug_mode=debug_mode,
2433
+ dependencies=run_dependencies,
2371
2434
  **kwargs,
2372
2435
  )
2373
2436
 
@@ -2385,17 +2448,6 @@ class Team:
2385
2448
  import time
2386
2449
 
2387
2450
  time.sleep(delay)
2388
- except RunCancelledException as e:
2389
- # Handle run cancellation
2390
- log_info(f"Team run {run_response.run_id} was cancelled")
2391
- run_response.content = str(e)
2392
- run_response.status = RunStatus.cancelled
2393
-
2394
- # Add the RunOutput to Team Session even when cancelled
2395
- team_session.upsert_run(run_response=run_response)
2396
- self.save_session(session=team_session)
2397
-
2398
- return run_response
2399
2451
  except KeyboardInterrupt:
2400
2452
  run_response.content = "Operation cancelled by user"
2401
2453
  run_response.status = RunStatus.cancelled
@@ -3025,7 +3077,9 @@ class Team:
3025
3077
  run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3026
3078
  )
3027
3079
  if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
3028
- tasks.append(self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id))
3080
+ tasks.append(
3081
+ self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id, team_id=self.id)
3082
+ )
3029
3083
 
3030
3084
  if self.session_summary_manager is not None:
3031
3085
  tasks.append(self.session_summary_manager.acreate_session_summary(session=session))
@@ -3395,6 +3449,11 @@ class Team:
3395
3449
  tags_to_include_in_markdown: Optional[Set[str]] = None,
3396
3450
  **kwargs: Any,
3397
3451
  ) -> None:
3452
+ if self._has_async_db():
3453
+ raise Exception(
3454
+ "This method is not supported with an async DB. Please use the async version of this method."
3455
+ )
3456
+
3398
3457
  if not tags_to_include_in_markdown:
3399
3458
  tags_to_include_in_markdown = {"think", "thinking"}
3400
3459
 
@@ -3583,8 +3642,6 @@ class Team:
3583
3642
  run_response.input.audios = []
3584
3643
  run_response.input.files = []
3585
3644
 
3586
- # 2. RunOutput artifact media are skipped since we don't store them when store_media=False
3587
-
3588
3645
  # 3. Scrub media from all messages
3589
3646
  if run_response.messages:
3590
3647
  for message in run_response.messages:
@@ -3613,6 +3670,100 @@ class Team:
3613
3670
  message.image_output = None
3614
3671
  message.video_output = None
3615
3672
 
3673
+ def _scrub_tool_results_from_run_output(self, run_response: TeamRunOutput) -> None:
3674
+ """
3675
+ Remove all tool-related data from RunOutput when store_tool_messages=False.
3676
+ This removes both the tool call and its corresponding result to maintain API consistency.
3677
+ """
3678
+ if not run_response.messages:
3679
+ return
3680
+
3681
+ # Step 1: Collect all tool_call_ids from tool result messages
3682
+ tool_call_ids_to_remove = set()
3683
+ for message in run_response.messages:
3684
+ if message.role == "tool" and message.tool_call_id:
3685
+ tool_call_ids_to_remove.add(message.tool_call_id)
3686
+
3687
+ # Step 2: Remove tool result messages (role="tool")
3688
+ run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
3689
+
3690
+ # Step 3: Remove assistant messages that made those tool calls
3691
+ filtered_messages = []
3692
+ for message in run_response.messages:
3693
+ # Check if this assistant message made any of the tool calls we're removing
3694
+ should_remove = False
3695
+ if message.role == "assistant" and message.tool_calls:
3696
+ for tool_call in message.tool_calls:
3697
+ if tool_call.get("id") in tool_call_ids_to_remove:
3698
+ should_remove = True
3699
+ break
3700
+
3701
+ if not should_remove:
3702
+ filtered_messages.append(message)
3703
+
3704
+ run_response.messages = filtered_messages
3705
+
3706
+ def _scrub_history_messages_from_run_output(self, run_response: TeamRunOutput) -> None:
3707
+ """
3708
+ Remove all history messages from TeamRunOutput when store_history_messages=False.
3709
+ This removes messages that were loaded from the team's memory.
3710
+ """
3711
+ # Remove messages with from_history=True
3712
+ if run_response.messages:
3713
+ run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
3714
+
3715
+ def _scrub_run_output_for_storage(self, run_response: TeamRunOutput) -> bool:
3716
+ """
3717
+ Scrub run output based on storage flags before persisting to database.
3718
+ Returns True if any scrubbing was done, False otherwise.
3719
+ """
3720
+ scrubbed = False
3721
+
3722
+ if not self.store_media:
3723
+ self._scrub_media_from_run_output(run_response)
3724
+ scrubbed = True
3725
+
3726
+ if not self.store_tool_messages:
3727
+ self._scrub_tool_results_from_run_output(run_response)
3728
+ scrubbed = True
3729
+
3730
+ if not self.store_history_messages:
3731
+ self._scrub_history_messages_from_run_output(run_response)
3732
+ scrubbed = True
3733
+
3734
+ return scrubbed
3735
+
3736
+ def _scrub_member_responses(self, member_responses: List[Union[TeamRunOutput, RunOutput]]) -> None:
3737
+ """
3738
+ Scrub member responses based on each member's storage flags.
3739
+ This is called when saving the team session to ensure member data is scrubbed per member settings.
3740
+ Recursively handles nested team's member responses.
3741
+ """
3742
+ for member_response in member_responses:
3743
+ member_id = None
3744
+ if isinstance(member_response, RunOutput):
3745
+ member_id = member_response.agent_id
3746
+ elif isinstance(member_response, TeamRunOutput):
3747
+ member_id = member_response.team_id
3748
+
3749
+ if not member_id:
3750
+ log_info("Skipping member response with no ID")
3751
+ continue
3752
+
3753
+ member_result = self._find_member_by_id(member_id)
3754
+ if not member_result:
3755
+ log_debug(f"Could not find member with ID: {member_id}")
3756
+ continue
3757
+
3758
+ _, member = member_result
3759
+
3760
+ if not member.store_media or not member.store_tool_messages or not member.store_history_messages:
3761
+ member._scrub_run_output_for_storage(member_response) # type: ignore
3762
+
3763
+ # If this is a nested team, recursively scrub its member responses
3764
+ if isinstance(member_response, TeamRunOutput) and member_response.member_responses:
3765
+ member._scrub_member_responses(member_response.member_responses) # type: ignore
3766
+
3616
3767
  def _validate_media_object_id(
3617
3768
  self,
3618
3769
  images: Optional[Sequence[Image]] = None,
@@ -3856,12 +4007,15 @@ class Team:
3856
4007
 
3857
4008
  # If a reasoning model is provided, use it to generate reasoning
3858
4009
  if reasoning_model_provided:
4010
+ from agno.reasoning.anthropic import is_anthropic_reasoning_model
3859
4011
  from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
3860
4012
  from agno.reasoning.deepseek import is_deepseek_reasoning_model
4013
+ from agno.reasoning.gemini import is_gemini_reasoning_model
3861
4014
  from agno.reasoning.groq import is_groq_reasoning_model
3862
4015
  from agno.reasoning.helpers import get_reasoning_agent
3863
4016
  from agno.reasoning.ollama import is_ollama_reasoning_model
3864
4017
  from agno.reasoning.openai import is_openai_reasoning_model
4018
+ from agno.reasoning.vertexai import is_vertexai_reasoning_model
3865
4019
 
3866
4020
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
3867
4021
  reasoning_model=reasoning_model,
@@ -3874,8 +4028,20 @@ class Team:
3874
4028
  is_openai = is_openai_reasoning_model(reasoning_model)
3875
4029
  is_ollama = is_ollama_reasoning_model(reasoning_model)
3876
4030
  is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
4031
+ is_gemini = is_gemini_reasoning_model(reasoning_model)
4032
+ is_anthropic = is_anthropic_reasoning_model(reasoning_model)
4033
+ is_vertexai = is_vertexai_reasoning_model(reasoning_model)
3877
4034
 
3878
- if is_deepseek or is_groq or is_openai or is_ollama or is_ai_foundry:
4035
+ if (
4036
+ is_deepseek
4037
+ or is_groq
4038
+ or is_openai
4039
+ or is_ollama
4040
+ or is_ai_foundry
4041
+ or is_gemini
4042
+ or is_anthropic
4043
+ or is_vertexai
4044
+ ):
3879
4045
  reasoning_message: Optional[Message] = None
3880
4046
  if is_deepseek:
3881
4047
  from agno.reasoning.deepseek import get_deepseek_reasoning
@@ -3912,6 +4078,27 @@ class Team:
3912
4078
  reasoning_message = get_ai_foundry_reasoning(
3913
4079
  reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
3914
4080
  )
4081
+ elif is_gemini:
4082
+ from agno.reasoning.gemini import get_gemini_reasoning
4083
+
4084
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
4085
+ reasoning_message = get_gemini_reasoning(
4086
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4087
+ )
4088
+ elif is_anthropic:
4089
+ from agno.reasoning.anthropic import get_anthropic_reasoning
4090
+
4091
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
4092
+ reasoning_message = get_anthropic_reasoning(
4093
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4094
+ )
4095
+ elif is_vertexai:
4096
+ from agno.reasoning.vertexai import get_vertexai_reasoning
4097
+
4098
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
4099
+ reasoning_message = get_vertexai_reasoning(
4100
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4101
+ )
3915
4102
 
3916
4103
  if reasoning_message is None:
3917
4104
  log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
@@ -4090,12 +4277,15 @@ class Team:
4090
4277
 
4091
4278
  # If a reasoning model is provided, use it to generate reasoning
4092
4279
  if reasoning_model_provided:
4280
+ from agno.reasoning.anthropic import is_anthropic_reasoning_model
4093
4281
  from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
4094
4282
  from agno.reasoning.deepseek import is_deepseek_reasoning_model
4283
+ from agno.reasoning.gemini import is_gemini_reasoning_model
4095
4284
  from agno.reasoning.groq import is_groq_reasoning_model
4096
4285
  from agno.reasoning.helpers import get_reasoning_agent
4097
4286
  from agno.reasoning.ollama import is_ollama_reasoning_model
4098
4287
  from agno.reasoning.openai import is_openai_reasoning_model
4288
+ from agno.reasoning.vertexai import is_vertexai_reasoning_model
4099
4289
 
4100
4290
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
4101
4291
  reasoning_model=reasoning_model,
@@ -4108,8 +4298,20 @@ class Team:
4108
4298
  is_openai = is_openai_reasoning_model(reasoning_model)
4109
4299
  is_ollama = is_ollama_reasoning_model(reasoning_model)
4110
4300
  is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
4301
+ is_gemini = is_gemini_reasoning_model(reasoning_model)
4302
+ is_anthropic = is_anthropic_reasoning_model(reasoning_model)
4303
+ is_vertexai = is_vertexai_reasoning_model(reasoning_model)
4111
4304
 
4112
- if is_deepseek or is_groq or is_openai or is_ollama or is_ai_foundry:
4305
+ if (
4306
+ is_deepseek
4307
+ or is_groq
4308
+ or is_openai
4309
+ or is_ollama
4310
+ or is_ai_foundry
4311
+ or is_gemini
4312
+ or is_anthropic
4313
+ or is_vertexai
4314
+ ):
4113
4315
  reasoning_message: Optional[Message] = None
4114
4316
  if is_deepseek:
4115
4317
  from agno.reasoning.deepseek import aget_deepseek_reasoning
@@ -4146,6 +4348,27 @@ class Team:
4146
4348
  reasoning_message = get_ai_foundry_reasoning(
4147
4349
  reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4148
4350
  )
4351
+ elif is_gemini:
4352
+ from agno.reasoning.gemini import aget_gemini_reasoning
4353
+
4354
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
4355
+ reasoning_message = await aget_gemini_reasoning(
4356
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4357
+ )
4358
+ elif is_anthropic:
4359
+ from agno.reasoning.anthropic import aget_anthropic_reasoning
4360
+
4361
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
4362
+ reasoning_message = await aget_anthropic_reasoning(
4363
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4364
+ )
4365
+ elif is_vertexai:
4366
+ from agno.reasoning.vertexai import aget_vertexai_reasoning
4367
+
4368
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
4369
+ reasoning_message = await aget_vertexai_reasoning(
4370
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4371
+ )
4149
4372
 
4150
4373
  if reasoning_message is None:
4151
4374
  log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
@@ -4715,7 +4938,7 @@ class Team:
4715
4938
  system_message_content += f"{indent * ' '} - Name: {member.name}\n"
4716
4939
  if member.role is not None:
4717
4940
  system_message_content += f"{indent * ' '} - Role: {member.role}\n"
4718
- if member.tools and self.add_member_tools_to_context:
4941
+ if member.tools is not None and member.tools != [] and self.add_member_tools_to_context:
4719
4942
  system_message_content += f"{indent * ' '} - Member tools:\n"
4720
4943
  for _tool in member.tools:
4721
4944
  if isinstance(_tool, Toolkit):
@@ -4921,9 +5144,7 @@ class Team:
4921
5144
  _memory_manager_not_set = True
4922
5145
  user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
4923
5146
  if user_memories and len(user_memories) > 0:
4924
- system_message_content += (
4925
- "You have access to memories from previous interactions with the user that you can use:\n\n"
4926
- )
5147
+ system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
4927
5148
  system_message_content += "<memories_from_previous_interactions>"
4928
5149
  for _memory in user_memories: # type: ignore
4929
5150
  system_message_content += f"\n- {_memory.memory}"
@@ -5025,6 +5246,305 @@ class Team:
5025
5246
 
5026
5247
  return Message(role=self.system_message_role, content=system_message_content.strip())
5027
5248
 
5249
+ async def aget_system_message(
5250
+ self,
5251
+ session: TeamSession,
5252
+ session_state: Optional[Dict[str, Any]] = None,
5253
+ user_id: Optional[str] = None,
5254
+ audio: Optional[Sequence[Audio]] = None,
5255
+ images: Optional[Sequence[Image]] = None,
5256
+ videos: Optional[Sequence[Video]] = None,
5257
+ files: Optional[Sequence[File]] = None,
5258
+ dependencies: Optional[Dict[str, Any]] = None,
5259
+ metadata: Optional[Dict[str, Any]] = None,
5260
+ add_session_state_to_context: Optional[bool] = None,
5261
+ ) -> Optional[Message]:
5262
+ """Get the system message for the team."""
5263
+
5264
+ # 1. If the system_message is provided, use that.
5265
+ if self.system_message is not None:
5266
+ if isinstance(self.system_message, Message):
5267
+ return self.system_message
5268
+
5269
+ sys_message_content: str = ""
5270
+ if isinstance(self.system_message, str):
5271
+ sys_message_content = self.system_message
5272
+ elif callable(self.system_message):
5273
+ sys_message_content = self.system_message(agent=self)
5274
+ if not isinstance(sys_message_content, str):
5275
+ raise Exception("system_message must return a string")
5276
+
5277
+ # Format the system message with the session state variables
5278
+ if self.resolve_in_context:
5279
+ sys_message_content = self._format_message_with_state_variables(
5280
+ sys_message_content,
5281
+ user_id=user_id,
5282
+ session_state=session_state,
5283
+ dependencies=dependencies,
5284
+ metadata=metadata,
5285
+ )
5286
+
5287
+ # type: ignore
5288
+ return Message(role=self.system_message_role, content=sys_message_content)
5289
+
5290
+ # 1. Build and return the default system message for the Team.
5291
+ # 1.1 Build the list of instructions for the system message
5292
+ self.model = cast(Model, self.model)
5293
+ instructions: List[str] = []
5294
+ if self.instructions is not None:
5295
+ _instructions = self.instructions
5296
+ if callable(self.instructions):
5297
+ import inspect
5298
+
5299
+ signature = inspect.signature(self.instructions)
5300
+ if "team" in signature.parameters:
5301
+ _instructions = self.instructions(team=self)
5302
+ elif "agent" in signature.parameters:
5303
+ _instructions = self.instructions(agent=self)
5304
+ else:
5305
+ _instructions = self.instructions()
5306
+
5307
+ if isinstance(_instructions, str):
5308
+ instructions.append(_instructions)
5309
+ elif isinstance(_instructions, list):
5310
+ instructions.extend(_instructions)
5311
+
5312
+ # 1.2 Add instructions from the Model
5313
+ _model_instructions = self.model.get_instructions_for_model(self._tools_for_model)
5314
+ if _model_instructions is not None:
5315
+ instructions.extend(_model_instructions)
5316
+
5317
+ # 1.3 Build a list of additional information for the system message
5318
+ additional_information: List[str] = []
5319
+ # 1.3.1 Add instructions for using markdown
5320
+ if self.markdown and self.output_schema is None:
5321
+ additional_information.append("Use markdown to format your answers.")
5322
+ # 1.3.2 Add the current datetime
5323
+ if self.add_datetime_to_context:
5324
+ from datetime import datetime
5325
+
5326
+ tz = None
5327
+
5328
+ if self.timezone_identifier:
5329
+ try:
5330
+ from zoneinfo import ZoneInfo
5331
+
5332
+ tz = ZoneInfo(self.timezone_identifier)
5333
+ except Exception:
5334
+ log_warning("Invalid timezone identifier")
5335
+
5336
+ time = datetime.now(tz) if tz else datetime.now()
5337
+
5338
+ additional_information.append(f"The current time is {time}.")
5339
+
5340
+ # 1.3.3 Add the current location
5341
+ if self.add_location_to_context:
5342
+ from agno.utils.location import get_location
5343
+
5344
+ location = get_location()
5345
+ if location:
5346
+ location_str = ", ".join(
5347
+ filter(None, [location.get("city"), location.get("region"), location.get("country")])
5348
+ )
5349
+ if location_str:
5350
+ additional_information.append(f"Your approximate location is: {location_str}.")
5351
+
5352
+ # 1.3.4 Add team name if provided
5353
+ if self.name is not None and self.add_name_to_context:
5354
+ additional_information.append(f"Your name is: {self.name}.")
5355
+
5356
+ if self.knowledge is not None and self.enable_agentic_knowledge_filters:
5357
+ valid_filters = getattr(self.knowledge, "valid_metadata_filters", None)
5358
+ if valid_filters:
5359
+ valid_filters_str = ", ".join(valid_filters)
5360
+ additional_information.append(
5361
+ dedent(f"""
5362
+ The knowledge base contains documents with these metadata filters: {valid_filters_str}.
5363
+ Always use filters when the user query indicates specific metadata.
5364
+ Examples:
5365
+ 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
5366
+ 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
5367
+ 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
5368
+ General Guidelines:
5369
+ - Always analyze the user query to identify relevant metadata.
5370
+ - Use the most specific filter(s) possible to narrow down results.
5371
+ - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
5372
+ - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
5373
+ You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
5374
+ """)
5375
+ )
5376
+
5377
+ # 2 Build the default system message for the Agent.
5378
+ system_message_content: str = ""
5379
+ system_message_content += "You are the leader of a team and sub-teams of AI Agents.\n"
5380
+ system_message_content += "Your task is to coordinate the team to complete the user's request.\n"
5381
+
5382
+ system_message_content += "\nHere are the members in your team:\n"
5383
+ system_message_content += "<team_members>\n"
5384
+ system_message_content += self.get_members_system_message_content()
5385
+ if self.get_member_information_tool:
5386
+ system_message_content += "If you need to get information about your team members, you can use the `get_member_information` tool at any time.\n"
5387
+ system_message_content += "</team_members>\n"
5388
+
5389
+ system_message_content += "\n<how_to_respond>\n"
5390
+
5391
+ if self.delegate_task_to_all_members:
5392
+ system_message_content += (
5393
+ "- Your role is to forward tasks to members in your team with the highest likelihood of completing the user's request.\n"
5394
+ "- You can either respond directly or use the `delegate_task_to_members` tool to delegate a task to all members in your team to get a collaborative response.\n"
5395
+ "- To delegate a task to all members in your team, call `delegate_task_to_members` ONLY once. This will delegate a task to all members in your team.\n"
5396
+ "- Analyze the responses from all members and evaluate whether the task has been completed.\n"
5397
+ "- If you feel the task has been completed, you can stop and respond to the user.\n"
5398
+ )
5399
+ else:
5400
+ system_message_content += (
5401
+ "- Your role is to delegate tasks to members in your team with the highest likelihood of completing the user's request.\n"
5402
+ "- Carefully analyze the tools available to the members and their roles before delegating tasks.\n"
5403
+ "- You cannot use a member tool directly. You can only delegate tasks to members.\n"
5404
+ "- When you delegate a task to another member, make sure to include:\n"
5405
+ " - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
5406
+ " - task_description (str): A clear description of the task.\n"
5407
+ " - expected_output (str): The expected output.\n"
5408
+ "- You can delegate tasks to multiple members at once.\n"
5409
+ "- You must always analyze the responses from members before responding to the user.\n"
5410
+ "- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
5411
+ "- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
5412
+ "- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
5413
+ "- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
5414
+ )
5415
+ system_message_content += "</how_to_respond>\n\n"
5416
+
5417
+ # Attached media
5418
+ if audio is not None or images is not None or videos is not None or files is not None:
5419
+ system_message_content += "<attached_media>\n"
5420
+ system_message_content += "You have the following media attached to your message:\n"
5421
+ if audio is not None and len(audio) > 0:
5422
+ system_message_content += " - Audio\n"
5423
+ if images is not None and len(images) > 0:
5424
+ system_message_content += " - Images\n"
5425
+ if videos is not None and len(videos) > 0:
5426
+ system_message_content += " - Videos\n"
5427
+ if files is not None and len(files) > 0:
5428
+ system_message_content += " - Files\n"
5429
+ system_message_content += "</attached_media>\n\n"
5430
+
5431
+ # Then add memories to the system prompt
5432
+ if self.add_memories_to_context:
5433
+ _memory_manager_not_set = False
5434
+ if not user_id:
5435
+ user_id = "default"
5436
+ if self.memory_manager is None:
5437
+ self._set_memory_manager()
5438
+ _memory_manager_not_set = True
5439
+
5440
+ if self._has_async_db():
5441
+ user_memories = await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
5442
+ else:
5443
+ user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
5444
+
5445
+ if user_memories and len(user_memories) > 0:
5446
+ system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
5447
+ system_message_content += "<memories_from_previous_interactions>"
5448
+ for _memory in user_memories: # type: ignore
5449
+ system_message_content += f"\n- {_memory.memory}"
5450
+ system_message_content += "\n</memories_from_previous_interactions>\n\n"
5451
+ system_message_content += (
5452
+ "Note: this information is from previous interactions and may be updated in this conversation. "
5453
+ "You should always prefer information from this conversation over the past memories.\n"
5454
+ )
5455
+ else:
5456
+ system_message_content += (
5457
+ "You have the capability to retain memories from previous interactions with the user, "
5458
+ "but have not had any interactions with the user yet.\n"
5459
+ )
5460
+ if _memory_manager_not_set:
5461
+ self.memory_manager = None
5462
+
5463
+ if self.enable_agentic_memory:
5464
+ system_message_content += (
5465
+ "\n<updating_user_memories>\n"
5466
+ "- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
5467
+ "- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
5468
+ "- Memories should include details that could personalize ongoing interactions with the user.\n"
5469
+ "- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
5470
+ "- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
5471
+ "- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
5472
+ "</updating_user_memories>\n\n"
5473
+ )
5474
+
5475
+ # Then add a summary of the interaction to the system prompt
5476
+ if self.add_session_summary_to_context and session.summary is not None:
5477
+ system_message_content += "Here is a brief summary of your previous interactions:\n\n"
5478
+ system_message_content += "<summary_of_previous_interactions>\n"
5479
+ system_message_content += session.summary.summary
5480
+ system_message_content += "\n</summary_of_previous_interactions>\n\n"
5481
+ system_message_content += (
5482
+ "Note: this information is from previous interactions and may be outdated. "
5483
+ "You should ALWAYS prefer information from this conversation over the past summary.\n\n"
5484
+ )
5485
+
5486
+ if self.description is not None:
5487
+ system_message_content += f"<description>\n{self.description}\n</description>\n\n"
5488
+
5489
+ if self.role is not None:
5490
+ system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
5491
+
5492
+ # 3.3.5 Then add instructions for the Agent
5493
+ if len(instructions) > 0:
5494
+ system_message_content += "<instructions>"
5495
+ if len(instructions) > 1:
5496
+ for _upi in instructions:
5497
+ system_message_content += f"\n- {_upi}"
5498
+ else:
5499
+ system_message_content += "\n" + instructions[0]
5500
+ system_message_content += "\n</instructions>\n\n"
5501
+ # 3.3.6 Add additional information
5502
+ if len(additional_information) > 0:
5503
+ system_message_content += "<additional_information>"
5504
+ for _ai in additional_information:
5505
+ system_message_content += f"\n- {_ai}"
5506
+ system_message_content += "\n</additional_information>\n\n"
5507
+ # 3.3.7 Then add instructions for the tools
5508
+ if self._tool_instructions is not None:
5509
+ for _ti in self._tool_instructions:
5510
+ system_message_content += f"{_ti}\n"
5511
+
5512
+ # Format the system message with the session state variables
5513
+ if self.resolve_in_context:
5514
+ system_message_content = self._format_message_with_state_variables(
5515
+ system_message_content,
5516
+ user_id=user_id,
5517
+ session_state=session_state,
5518
+ dependencies=dependencies,
5519
+ metadata=metadata,
5520
+ )
5521
+
5522
+ system_message_from_model = self.model.get_system_message_for_model(self._tools_for_model)
5523
+ if system_message_from_model is not None:
5524
+ system_message_content += system_message_from_model
5525
+
5526
+ if self.expected_output is not None:
5527
+ system_message_content += f"<expected_output>\n{self.expected_output.strip()}\n</expected_output>\n\n"
5528
+
5529
+ if self.additional_context is not None:
5530
+ system_message_content += (
5531
+ f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
5532
+ )
5533
+
5534
+ if self.add_session_state_to_context:
5535
+ system_message_content += f"<session_state>\n{session_state}\n</session_state>\n\n"
5536
+
5537
+ # Add the JSON output prompt if output_schema is provided and structured_outputs is False
5538
+ if (
5539
+ self.output_schema is not None
5540
+ and self.use_json_mode
5541
+ and self.model
5542
+ and self.model.supports_native_structured_outputs
5543
+ ):
5544
+ system_message_content += f"{self._get_json_output_prompt()}"
5545
+
5546
+ return Message(role=self.system_message_role, content=system_message_content.strip())
5547
+
5028
5548
  def _get_formatted_session_state_for_system_message(self, session_state: Dict[str, Any]) -> str:
5029
5549
  return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
5030
5550
 
@@ -5163,6 +5683,134 @@ class Team:
5163
5683
 
5164
5684
  return run_messages
5165
5685
 
5686
+ async def _aget_run_messages(
5687
+ self,
5688
+ *,
5689
+ run_response: TeamRunOutput,
5690
+ session: TeamSession,
5691
+ session_state: Optional[Dict[str, Any]] = None,
5692
+ user_id: Optional[str] = None,
5693
+ input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
5694
+ audio: Optional[Sequence[Audio]] = None,
5695
+ images: Optional[Sequence[Image]] = None,
5696
+ videos: Optional[Sequence[Video]] = None,
5697
+ files: Optional[Sequence[File]] = None,
5698
+ knowledge_filters: Optional[Dict[str, Any]] = None,
5699
+ add_history_to_context: Optional[bool] = None,
5700
+ dependencies: Optional[Dict[str, Any]] = None,
5701
+ add_dependencies_to_context: Optional[bool] = None,
5702
+ add_session_state_to_context: Optional[bool] = None,
5703
+ metadata: Optional[Dict[str, Any]] = None,
5704
+ **kwargs: Any,
5705
+ ) -> RunMessages:
5706
+ """This function returns a RunMessages object with the following attributes:
5707
+ - system_message: The system message for this run
5708
+ - user_message: The user message for this run
5709
+ - messages: List of messages to send to the model
5710
+
5711
+ To build the RunMessages object:
5712
+ 1. Add system message to run_messages
5713
+ 2. Add extra messages to run_messages
5714
+ 3. Add history to run_messages
5715
+ 4. Add messages to run_messages if provided (messages parameter first)
5716
+ 5. Add user message to run_messages (message parameter second)
5717
+
5718
+ """
5719
+ # Initialize the RunMessages object
5720
+ run_messages = RunMessages()
5721
+
5722
+ # 1. Add system message to run_messages
5723
+ system_message = await self.aget_system_message(
5724
+ session=session,
5725
+ session_state=session_state,
5726
+ user_id=user_id,
5727
+ images=images,
5728
+ audio=audio,
5729
+ videos=videos,
5730
+ files=files,
5731
+ dependencies=dependencies,
5732
+ metadata=metadata,
5733
+ add_session_state_to_context=add_session_state_to_context,
5734
+ )
5735
+ if system_message is not None:
5736
+ run_messages.system_message = system_message
5737
+ run_messages.messages.append(system_message)
5738
+
5739
+ # 2. Add extra messages to run_messages if provided
5740
+ if self.additional_input is not None:
5741
+ messages_to_add_to_run_response: List[Message] = []
5742
+ if run_messages.extra_messages is None:
5743
+ run_messages.extra_messages = []
5744
+
5745
+ for _m in self.additional_input:
5746
+ if isinstance(_m, Message):
5747
+ messages_to_add_to_run_response.append(_m)
5748
+ run_messages.messages.append(_m)
5749
+ run_messages.extra_messages.append(_m)
5750
+ elif isinstance(_m, dict):
5751
+ try:
5752
+ _m_parsed = Message.model_validate(_m)
5753
+ messages_to_add_to_run_response.append(_m_parsed)
5754
+ run_messages.messages.append(_m_parsed)
5755
+ run_messages.extra_messages.append(_m_parsed)
5756
+ except Exception as e:
5757
+ log_warning(f"Failed to validate message: {e}")
5758
+ # Add the extra messages to the run_response
5759
+ if len(messages_to_add_to_run_response) > 0:
5760
+ log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
5761
+ if run_response.additional_input is None:
5762
+ run_response.additional_input = messages_to_add_to_run_response
5763
+ else:
5764
+ run_response.additional_input.extend(messages_to_add_to_run_response)
5765
+
5766
+ # 3. Add history to run_messages
5767
+ if add_history_to_context:
5768
+ from copy import deepcopy
5769
+
5770
+ history = session.get_messages_from_last_n_runs(
5771
+ last_n=self.num_history_runs,
5772
+ skip_role=self.system_message_role,
5773
+ team_id=self.id,
5774
+ )
5775
+
5776
+ if len(history) > 0:
5777
+ # Create a deep copy of the history messages to avoid modifying the original messages
5778
+ history_copy = [deepcopy(msg) for msg in history]
5779
+
5780
+ # Tag each message as coming from history
5781
+ for _msg in history_copy:
5782
+ _msg.from_history = True
5783
+
5784
+ log_debug(f"Adding {len(history_copy)} messages from history")
5785
+
5786
+ # Extend the messages with the history
5787
+ run_messages.messages += history_copy
5788
+
5789
+ # 5. Add user message to run_messages (message second as per Dirk's requirement)
5790
+ user_message: Optional[Message] = None
5791
+ # 5.1 Build user message if message is None, str or list
5792
+ user_message = self._get_user_message(
5793
+ run_response=run_response,
5794
+ session_state=session_state,
5795
+ input_message=input_message,
5796
+ user_id=user_id,
5797
+ audio=audio,
5798
+ images=images,
5799
+ videos=videos,
5800
+ files=files,
5801
+ knowledge_filters=knowledge_filters,
5802
+ dependencies=dependencies,
5803
+ add_dependencies_to_context=add_dependencies_to_context,
5804
+ metadata=metadata,
5805
+ **kwargs,
5806
+ )
5807
+ # Add user message to run_messages
5808
+ if user_message is not None:
5809
+ run_messages.user_message = user_message
5810
+ run_messages.messages.append(user_message)
5811
+
5812
+ return run_messages
5813
+
5166
5814
  def _get_user_message(
5167
5815
  self,
5168
5816
  *,
@@ -5662,6 +6310,7 @@ class Team:
5662
6310
  if self.db is None:
5663
6311
  return "Previous session messages not available"
5664
6312
 
6313
+ self.db = cast(BaseDb, self.db)
5665
6314
  selected_sessions = self.db.get_sessions(
5666
6315
  session_type=SessionType.TEAM,
5667
6316
  limit=num_history_sessions,
@@ -5701,7 +6350,62 @@ class Team:
5701
6350
 
5702
6351
  return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
5703
6352
 
5704
- return get_previous_session_messages
6353
+ async def aget_previous_session_messages() -> str:
6354
+ """Use this function to retrieve messages from previous chat sessions.
6355
+ USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
6356
+
6357
+ Returns:
6358
+ str: JSON formatted list of message pairs from previous sessions
6359
+ """
6360
+ import json
6361
+
6362
+ if self.db is None:
6363
+ return "Previous session messages not available"
6364
+
6365
+ self.db = cast(AsyncBaseDb, self.db)
6366
+ selected_sessions = await self.db.get_sessions(
6367
+ session_type=SessionType.TEAM,
6368
+ limit=num_history_sessions,
6369
+ user_id=user_id,
6370
+ sort_by="created_at",
6371
+ sort_order="desc",
6372
+ )
6373
+
6374
+ all_messages = []
6375
+ seen_message_pairs = set()
6376
+
6377
+ for session in selected_sessions:
6378
+ if isinstance(session, TeamSession) and session.runs:
6379
+ message_count = 0
6380
+ for run in session.runs:
6381
+ messages = run.messages
6382
+ if messages is not None:
6383
+ for i in range(0, len(messages) - 1, 2):
6384
+ if i + 1 < len(messages):
6385
+ try:
6386
+ user_msg = messages[i]
6387
+ assistant_msg = messages[i + 1]
6388
+ user_content = user_msg.content
6389
+ assistant_content = assistant_msg.content
6390
+ if user_content is None or assistant_content is None:
6391
+ continue # Skip this pair if either message has no content
6392
+
6393
+ msg_pair_id = f"{user_content}:{assistant_content}"
6394
+ if msg_pair_id not in seen_message_pairs:
6395
+ seen_message_pairs.add(msg_pair_id)
6396
+ all_messages.append(Message.model_validate(user_msg))
6397
+ all_messages.append(Message.model_validate(assistant_msg))
6398
+ message_count += 1
6399
+ except Exception as e:
6400
+ log_warning(f"Error processing message pair: {e}")
6401
+ continue
6402
+
6403
+ return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
6404
+
6405
+ if self._has_async_db():
6406
+ return aget_previous_session_messages
6407
+ else:
6408
+ return get_previous_session_messages
5705
6409
 
5706
6410
  def _get_history_for_member_agent(self, session: TeamSession, member_agent: Union[Agent, "Team"]) -> List[Message]:
5707
6411
  from copy import deepcopy
@@ -5894,8 +6598,16 @@ class Team:
5894
6598
  if run_response and member_agent_run_response:
5895
6599
  run_response.add_member_run(member_agent_run_response)
5896
6600
 
5897
- # Add the member run to the team session
6601
+ # Scrub the member run based on that member's storage flags before storing
5898
6602
  if member_agent_run_response:
6603
+ if (
6604
+ not member_agent.store_media
6605
+ or not member_agent.store_tool_messages
6606
+ or not member_agent.store_history_messages
6607
+ ):
6608
+ member_agent._scrub_run_output_for_storage(member_agent_run_response) # type: ignore
6609
+
6610
+ # Add the member run to the team session
5899
6611
  session.upsert_run(member_agent_run_response)
5900
6612
 
5901
6613
  # Update team session state
@@ -6287,7 +6999,7 @@ class Team:
6287
6999
  done_marker = object()
6288
7000
  queue: "asyncio.Queue[Union[RunOutputEvent, TeamRunOutputEvent, str, object]]" = asyncio.Queue()
6289
7001
 
6290
- async def stream_member(agent: Union[Agent, "Team"], idx: int) -> None:
7002
+ async def stream_member(agent: Union[Agent, "Team"]) -> None:
6291
7003
  member_agent_task, history = _setup_delegate_task_to_member(
6292
7004
  agent, task_description, expected_output
6293
7005
  )
@@ -6335,11 +7047,10 @@ class Team:
6335
7047
 
6336
7048
  # Initialize and launch all members
6337
7049
  tasks: List[asyncio.Task[None]] = []
6338
- for member_agent_index, member_agent in enumerate(self.members):
7050
+ for member_agent in self.members:
6339
7051
  current_agent = member_agent
6340
- current_index = member_agent_index
6341
7052
  self._initialize_member(current_agent)
6342
- tasks.append(asyncio.create_task(stream_member(current_agent, current_index)))
7053
+ tasks.append(asyncio.create_task(stream_member(current_agent)))
6343
7054
 
6344
7055
  # Drain queue until all members reported done
6345
7056
  completed = 0
@@ -6364,7 +7075,6 @@ class Team:
6364
7075
  tasks = []
6365
7076
  for member_agent_index, member_agent in enumerate(self.members):
6366
7077
  current_agent = member_agent
6367
- current_index = member_agent_index
6368
7078
  member_agent_task, history = _setup_delegate_task_to_member(
6369
7079
  current_agent, task_description, expected_output
6370
7080
  )
@@ -6467,6 +7177,18 @@ class Team:
6467
7177
  log_warning(f"Error getting session from db: {e}")
6468
7178
  return None
6469
7179
 
7180
+ async def _aread_session(self, session_id: str) -> Optional[TeamSession]:
7181
+ """Get a Session from the database."""
7182
+ try:
7183
+ if not self.db:
7184
+ raise ValueError("Db not initialized")
7185
+ self.db = cast(AsyncBaseDb, self.db)
7186
+ session = await self.db.get_session(session_id=session_id, session_type=SessionType.TEAM)
7187
+ return session # type: ignore
7188
+ except Exception as e:
7189
+ log_warning(f"Error getting session from db: {e}")
7190
+ return None
7191
+
6470
7192
  def _upsert_session(self, session: TeamSession) -> Optional[TeamSession]:
6471
7193
  """Upsert a Session into the database."""
6472
7194
 
@@ -6478,6 +7200,17 @@ class Team:
6478
7200
  log_warning(f"Error upserting session into db: {e}")
6479
7201
  return None
6480
7202
 
7203
+ async def _aupsert_session(self, session: TeamSession) -> Optional[TeamSession]:
7204
+ """Upsert a Session into the database."""
7205
+
7206
+ try:
7207
+ if not self.db:
7208
+ raise ValueError("Db not initialized")
7209
+ return await self.db.upsert_session(session=session) # type: ignore
7210
+ except Exception as e:
7211
+ log_warning(f"Error upserting session into db: {e}")
7212
+ return None
7213
+
6481
7214
  def get_run_output(
6482
7215
  self, run_id: str, session_id: Optional[str] = None
6483
7216
  ) -> Optional[Union[TeamRunOutput, RunOutput]]:
@@ -6567,6 +7300,47 @@ class Team:
6567
7300
 
6568
7301
  return team_session
6569
7302
 
7303
+ async def _aread_or_create_session(self, session_id: str, user_id: Optional[str] = None) -> TeamSession:
7304
+ """Load the TeamSession from storage
7305
+
7306
+ Returns:
7307
+ Optional[TeamSession]: The loaded TeamSession or None if not found.
7308
+ """
7309
+ from time import time
7310
+
7311
+ from agno.session.team import TeamSession
7312
+
7313
+ # Return existing session if we have one
7314
+ if self._team_session is not None and self._team_session.session_id == session_id:
7315
+ return self._team_session
7316
+
7317
+ # Try to load from database
7318
+ team_session = None
7319
+ if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
7320
+ if self._has_async_db():
7321
+ team_session = cast(TeamSession, await self._aread_session(session_id=session_id))
7322
+ else:
7323
+ team_session = cast(TeamSession, self._read_session(session_id=session_id))
7324
+
7325
+ # Create new session if none found
7326
+ if team_session is None:
7327
+ log_debug(f"Creating new TeamSession: {session_id}")
7328
+ team_session = TeamSession(
7329
+ session_id=session_id,
7330
+ team_id=self.id,
7331
+ user_id=user_id,
7332
+ team_data=self._get_team_data(),
7333
+ session_data={},
7334
+ metadata=self.metadata,
7335
+ created_at=int(time()),
7336
+ )
7337
+
7338
+ # Cache the session if relevant
7339
+ if team_session is not None and self.cache_session:
7340
+ self._team_session = team_session
7341
+
7342
+ return team_session
7343
+
6570
7344
  def get_session(
6571
7345
  self,
6572
7346
  session_id: Optional[str] = None,
@@ -6610,14 +7384,29 @@ class Team:
6610
7384
  session.session_data["session_state"].pop("current_user_id", None) # type: ignore
6611
7385
  session.session_data["session_state"].pop("current_run_id", None) # type: ignore
6612
7386
 
6613
- # scrub the member responses if not storing them
6614
- if not self.store_member_responses and session.runs is not None:
7387
+ # scrub the member responses based on storage settings
7388
+ if session.runs is not None:
6615
7389
  for run in session.runs:
6616
7390
  if hasattr(run, "member_responses"):
6617
- run.member_responses = []
7391
+ if not self.store_member_responses:
7392
+ # Remove all member responses
7393
+ run.member_responses = []
7394
+ else:
7395
+ # Scrub individual member responses based on their storage flags
7396
+ self._scrub_member_responses(run.member_responses)
6618
7397
  self._upsert_session(session=session)
6619
7398
  log_debug(f"Created or updated TeamSession record: {session.session_id}")
6620
7399
 
7400
+ async def asave_session(self, session: TeamSession) -> None:
7401
+ """Save the TeamSession to storage"""
7402
+ if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
7403
+ if session.session_data is not None and "session_state" in session.session_data:
7404
+ session.session_data["session_state"].pop("current_session_id", None) # type: ignore
7405
+ session.session_data["session_state"].pop("current_user_id", None) # type: ignore
7406
+ session.session_data["session_state"].pop("current_run_id", None) # type: ignore
7407
+ await self._aupsert_session(session=session)
7408
+ log_debug(f"Created or updated TeamSession record: {session.session_id}")
7409
+
6621
7410
  def _load_session_state(self, session: TeamSession, session_state: Dict[str, Any]) -> Dict[str, Any]:
6622
7411
  """Load and return the stored session_state from the database, optionally merging it with the given one"""
6623
7412