agno 2.1.4__py3-none-any.whl → 2.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. agno/agent/agent.py +1767 -535
  2. agno/db/async_postgres/__init__.py +3 -0
  3. agno/db/async_postgres/async_postgres.py +1668 -0
  4. agno/db/async_postgres/schemas.py +124 -0
  5. agno/db/async_postgres/utils.py +289 -0
  6. agno/db/base.py +237 -2
  7. agno/db/dynamo/dynamo.py +2 -2
  8. agno/db/firestore/firestore.py +2 -2
  9. agno/db/firestore/utils.py +4 -2
  10. agno/db/gcs_json/gcs_json_db.py +2 -2
  11. agno/db/in_memory/in_memory_db.py +2 -2
  12. agno/db/json/json_db.py +2 -2
  13. agno/db/migrations/v1_to_v2.py +30 -13
  14. agno/db/mongo/mongo.py +18 -6
  15. agno/db/mysql/mysql.py +35 -13
  16. agno/db/postgres/postgres.py +29 -6
  17. agno/db/redis/redis.py +2 -2
  18. agno/db/singlestore/singlestore.py +2 -2
  19. agno/db/sqlite/sqlite.py +34 -12
  20. agno/db/sqlite/utils.py +8 -3
  21. agno/eval/accuracy.py +50 -43
  22. agno/eval/performance.py +6 -3
  23. agno/eval/reliability.py +6 -3
  24. agno/eval/utils.py +33 -16
  25. agno/exceptions.py +8 -2
  26. agno/knowledge/knowledge.py +260 -46
  27. agno/knowledge/reader/pdf_reader.py +4 -6
  28. agno/knowledge/reader/reader_factory.py +2 -3
  29. agno/memory/manager.py +241 -33
  30. agno/models/anthropic/claude.py +37 -0
  31. agno/os/app.py +8 -7
  32. agno/os/interfaces/a2a/router.py +3 -5
  33. agno/os/interfaces/agui/router.py +4 -1
  34. agno/os/interfaces/agui/utils.py +27 -6
  35. agno/os/interfaces/slack/router.py +2 -4
  36. agno/os/mcp.py +98 -41
  37. agno/os/router.py +23 -0
  38. agno/os/routers/evals/evals.py +52 -20
  39. agno/os/routers/evals/utils.py +14 -14
  40. agno/os/routers/knowledge/knowledge.py +130 -9
  41. agno/os/routers/knowledge/schemas.py +57 -0
  42. agno/os/routers/memory/memory.py +116 -44
  43. agno/os/routers/metrics/metrics.py +16 -6
  44. agno/os/routers/session/session.py +65 -22
  45. agno/os/schema.py +36 -0
  46. agno/os/utils.py +67 -12
  47. agno/reasoning/anthropic.py +80 -0
  48. agno/reasoning/gemini.py +73 -0
  49. agno/reasoning/openai.py +5 -0
  50. agno/reasoning/vertexai.py +76 -0
  51. agno/session/workflow.py +3 -3
  52. agno/team/team.py +918 -175
  53. agno/tools/googlesheets.py +20 -5
  54. agno/tools/mcp_toolbox.py +3 -3
  55. agno/tools/scrapegraph.py +1 -1
  56. agno/utils/models/claude.py +3 -1
  57. agno/utils/streamlit.py +1 -1
  58. agno/vectordb/base.py +22 -1
  59. agno/vectordb/cassandra/cassandra.py +9 -0
  60. agno/vectordb/chroma/chromadb.py +26 -6
  61. agno/vectordb/clickhouse/clickhousedb.py +9 -1
  62. agno/vectordb/couchbase/couchbase.py +11 -0
  63. agno/vectordb/lancedb/lance_db.py +20 -0
  64. agno/vectordb/langchaindb/langchaindb.py +11 -0
  65. agno/vectordb/lightrag/lightrag.py +9 -0
  66. agno/vectordb/llamaindex/llamaindexdb.py +15 -1
  67. agno/vectordb/milvus/milvus.py +23 -0
  68. agno/vectordb/mongodb/mongodb.py +22 -0
  69. agno/vectordb/pgvector/pgvector.py +19 -0
  70. agno/vectordb/pineconedb/pineconedb.py +35 -4
  71. agno/vectordb/qdrant/qdrant.py +24 -0
  72. agno/vectordb/singlestore/singlestore.py +25 -17
  73. agno/vectordb/surrealdb/surrealdb.py +18 -1
  74. agno/vectordb/upstashdb/upstashdb.py +26 -1
  75. agno/vectordb/weaviate/weaviate.py +18 -0
  76. agno/workflow/condition.py +4 -0
  77. agno/workflow/loop.py +4 -0
  78. agno/workflow/parallel.py +4 -0
  79. agno/workflow/router.py +4 -0
  80. agno/workflow/step.py +22 -14
  81. agno/workflow/steps.py +4 -0
  82. agno/workflow/types.py +2 -2
  83. agno/workflow/workflow.py +328 -61
  84. {agno-2.1.4.dist-info → agno-2.1.5.dist-info}/METADATA +100 -41
  85. {agno-2.1.4.dist-info → agno-2.1.5.dist-info}/RECORD +88 -81
  86. {agno-2.1.4.dist-info → agno-2.1.5.dist-info}/WHEEL +0 -0
  87. {agno-2.1.4.dist-info → agno-2.1.5.dist-info}/licenses/LICENSE +0 -0
  88. {agno-2.1.4.dist-info → agno-2.1.5.dist-info}/top_level.txt +0 -0
agno/team/team.py CHANGED
@@ -30,7 +30,7 @@ from uuid import uuid4
30
30
  from pydantic import BaseModel
31
31
 
32
32
  from agno.agent import Agent
33
- from agno.db.base import BaseDb, SessionType, UserMemory
33
+ from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
34
34
  from agno.exceptions import (
35
35
  InputCheckError,
36
36
  ModelProviderError,
@@ -214,7 +214,7 @@ class Team:
214
214
 
215
215
  # --- Database ---
216
216
  # Database to use for this agent
217
- db: Optional[BaseDb] = None
217
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None
218
218
 
219
219
  # Memory manager to use for this agent
220
220
  memory_manager: Optional[MemoryManager] = None
@@ -259,6 +259,10 @@ class Team:
259
259
  send_media_to_model: bool = True
260
260
  # If True, store media in run output
261
261
  store_media: bool = True
262
+ # If True, store tool results in run output
263
+ store_tool_results: bool = True
264
+ # If True, store history messages in run output
265
+ store_history_messages: bool = True
262
266
 
263
267
  # --- Team Tools ---
264
268
  # A list of tools provided to the Model.
@@ -418,6 +422,8 @@ class Team:
418
422
  search_knowledge: bool = True,
419
423
  read_team_history: bool = False,
420
424
  store_media: bool = True,
425
+ store_tool_results: bool = True,
426
+ store_history_messages: bool = True,
421
427
  send_media_to_model: bool = True,
422
428
  tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None,
423
429
  tool_call_limit: Optional[int] = None,
@@ -433,7 +439,7 @@ class Team:
433
439
  output_model_prompt: Optional[str] = None,
434
440
  use_json_mode: bool = False,
435
441
  parse_response: bool = True,
436
- db: Optional[BaseDb] = None,
442
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
437
443
  enable_agentic_memory: bool = False,
438
444
  enable_user_memories: bool = False,
439
445
  add_memories_to_context: Optional[bool] = None,
@@ -518,6 +524,8 @@ class Team:
518
524
  self.read_team_history = read_team_history
519
525
 
520
526
  self.store_media = store_media
527
+ self.store_tool_results = store_tool_results
528
+ self.store_history_messages = store_history_messages
521
529
  self.send_media_to_model = send_media_to_model
522
530
 
523
531
  self.tools = tools
@@ -792,6 +800,10 @@ class Team:
792
800
 
793
801
  return session_id, user_id, session_state # type: ignore
794
802
 
803
+ def _has_async_db(self) -> bool:
804
+ """Return True if the db the team is equipped with is an Async implementation"""
805
+ return self.db is not None and isinstance(self.db, AsyncBaseDb)
806
+
795
807
  def initialize_team(self, debug_mode: Optional[bool] = None) -> None:
796
808
  # Make sure for the team, we are using the team logger
797
809
  use_team_logger()
@@ -1205,7 +1217,11 @@ class Team:
1205
1217
  )
1206
1218
  deque(response_iterator, maxlen=0)
1207
1219
 
1208
- # 10. Save session to storage
1220
+ # 10. Scrub the stored run based on storage flags
1221
+ if self._scrub_run_output_for_storage(run_response):
1222
+ session.upsert_run(run_response=run_response)
1223
+
1224
+ # 11. Save session to storage
1209
1225
  self.save_session(session=session)
1210
1226
 
1211
1227
  # Log Team Telemetry
@@ -1410,7 +1426,11 @@ class Team:
1410
1426
  # 8. Calculate session metrics
1411
1427
  self._update_session_metrics(session=session)
1412
1428
 
1413
- # 9. Save session to storage
1429
+ # 9. Scrub the stored run based on storage flags
1430
+ if self._scrub_run_output_for_storage(run_response):
1431
+ session.upsert_run(run_response=run_response)
1432
+
1433
+ # 10. Save session to storage
1414
1434
  self.save_session(session=session)
1415
1435
 
1416
1436
  if stream_intermediate_steps:
@@ -1519,6 +1539,8 @@ class Team:
1519
1539
  **kwargs: Any,
1520
1540
  ) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
1521
1541
  """Run the Team and return the response."""
1542
+ if self._has_async_db():
1543
+ raise Exception("run() is not supported with an async DB. Please use arun() instead.")
1522
1544
 
1523
1545
  # Create a run_id for this specific run
1524
1546
  run_id = str(uuid4())
@@ -1736,46 +1758,71 @@ class Team:
1736
1758
 
1737
1759
  async def _arun(
1738
1760
  self,
1761
+ input: Union[str, List, Dict, Message, BaseModel],
1739
1762
  run_response: TeamRunOutput,
1740
- session: TeamSession,
1741
- session_state: Dict[str, Any],
1763
+ session_id: str,
1764
+ session_state: Optional[Dict[str, Any]] = None,
1765
+ store_member_responses: Optional[bool] = None,
1742
1766
  user_id: Optional[str] = None,
1743
- knowledge_filters: Optional[Dict[str, Any]] = None,
1744
- add_history_to_context: Optional[bool] = None,
1767
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1745
1768
  add_dependencies_to_context: Optional[bool] = None,
1746
1769
  add_session_state_to_context: Optional[bool] = None,
1770
+ add_history_to_context: Optional[bool] = None,
1771
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1747
1772
  metadata: Optional[Dict[str, Any]] = None,
1748
- response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1749
- dependencies: Optional[Dict[str, Any]] = None,
1773
+ audio: Optional[Sequence[Audio]] = None,
1774
+ images: Optional[Sequence[Image]] = None,
1775
+ videos: Optional[Sequence[Video]] = None,
1776
+ files: Optional[Sequence[File]] = None,
1750
1777
  debug_mode: Optional[bool] = None,
1778
+ dependencies: Optional[Dict[str, Any]] = None,
1751
1779
  **kwargs: Any,
1752
1780
  ) -> TeamRunOutput:
1753
1781
  """Run the Team and return the response.
1754
1782
 
1755
1783
  Steps:
1756
- 1. Resolve dependencies
1757
- 2. Execute pre-hooks
1758
- 3. Prepare run messages
1759
- 4. Reason about the task(s) if reasoning is enabled
1760
- 5. Get a response from the model
1761
- 6. Add RunOutput to Team Session
1762
- 7. Calculate session metrics
1763
- 8. Update Team Memory
1764
- 9. Save session to storage
1784
+ 1. Read or create session
1785
+ 2. Update metadata and session state
1786
+ 3. Execute pre-hooks
1787
+ 4. Determine tools for model
1788
+ 5. Prepare run messages
1789
+ 6. Reason about the task if reasoning is enabled
1790
+ 7. Get a response from the Model (includes running function calls)
1791
+ 8. Update TeamRunOutput
1792
+ 9. Add the run to memory
1793
+ 10. Calculate session metrics
1794
+ 11. Parse team response model
1795
+ 12. Update Team Memory
1796
+ 13. Scrub the stored run if needed
1797
+ 14. Save session to storage
1798
+ 15. Execute post-hooks
1765
1799
  """
1766
- # 1. Resolve callable dependencies if present
1767
- if dependencies is not None:
1768
- await self._aresolve_run_dependencies(dependencies=dependencies)
1800
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1801
+
1802
+ register_run(run_response.run_id) # type: ignore
1803
+
1804
+ # 1. Read or create session. Reads from the database if provided.
1805
+ if self._has_async_db():
1806
+ team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
1807
+ else:
1808
+ team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
1809
+
1810
+ # 2. Update metadata and session state
1811
+ self._update_metadata(session=team_session)
1812
+ session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
1813
+
1814
+ if store_member_responses is None:
1815
+ store_member_responses = False if self.store_member_responses is None else self.store_member_responses
1769
1816
 
1770
1817
  run_input = cast(TeamRunInput, run_response.input)
1771
- self.model = cast(Model, self.model)
1772
- # 2. Execute pre-hooks after session is loaded but before processing starts
1818
+
1819
+ # 3. Execute pre-hooks after session is loaded but before processing starts
1773
1820
  if self.pre_hooks is not None:
1774
1821
  pre_hook_iterator = self._aexecute_pre_hooks(
1775
1822
  hooks=self.pre_hooks, # type: ignore
1776
1823
  run_response=run_response,
1777
1824
  run_input=run_input,
1778
- session=session,
1825
+ session=team_session,
1779
1826
  user_id=user_id,
1780
1827
  debug_mode=debug_mode,
1781
1828
  **kwargs,
@@ -1785,14 +1832,14 @@ class Team:
1785
1832
  async for _ in pre_hook_iterator:
1786
1833
  pass
1787
1834
 
1788
- # Initialize the team run context
1835
+ # 4. Determine tools for model
1789
1836
  team_run_context: Dict[str, Any] = {}
1790
-
1837
+ self.model = cast(Model, self.model)
1791
1838
  self.determine_tools_for_model(
1792
1839
  model=self.model,
1793
1840
  run_response=run_response,
1794
1841
  team_run_context=team_run_context,
1795
- session=session,
1842
+ session=team_session,
1796
1843
  session_state=session_state,
1797
1844
  user_id=user_id,
1798
1845
  async_mode=True,
@@ -1810,39 +1857,36 @@ class Team:
1810
1857
  metadata=metadata,
1811
1858
  )
1812
1859
 
1813
- # 3. Prepare run messages
1814
- run_messages = self._get_run_messages(
1860
+ # 5. Prepare run messages
1861
+ run_messages = await self._aget_run_messages(
1815
1862
  run_response=run_response,
1816
- session=session,
1863
+ session=team_session, # type: ignore
1817
1864
  session_state=session_state,
1818
1865
  user_id=user_id,
1819
- input_message=run_input.input_content,
1820
- audio=run_input.audios,
1821
- images=run_input.images,
1822
- videos=run_input.videos,
1823
- files=run_input.files,
1866
+ input_message=input,
1867
+ audio=audio,
1868
+ images=images,
1869
+ videos=videos,
1870
+ files=files,
1824
1871
  knowledge_filters=knowledge_filters,
1825
1872
  add_history_to_context=add_history_to_context,
1826
1873
  dependencies=dependencies,
1827
1874
  add_dependencies_to_context=add_dependencies_to_context,
1828
1875
  add_session_state_to_context=add_session_state_to_context,
1829
- metadata=metadata,
1830
1876
  **kwargs,
1831
1877
  )
1832
1878
 
1833
- self.model = cast(Model, self.model)
1834
- log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1835
-
1836
1879
  # Register run for cancellation tracking
1837
1880
  register_run(run_response.run_id) # type: ignore
1838
1881
 
1839
- # 4. Reason about the task(s) if reasoning is enabled
1882
+ # 6. Reason about the task(s) if reasoning is enabled
1840
1883
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1841
1884
 
1842
1885
  # Check for cancellation before model call
1843
1886
  raise_if_cancelled(run_response.run_id) # type: ignore
1844
1887
 
1845
- # 5. Get the model response for the team leader
1888
+ # 7. Get the model response for the team leader
1889
+ self.model = cast(Model, self.model)
1846
1890
  model_response = await self.model.aresponse(
1847
1891
  messages=run_messages.messages,
1848
1892
  tools=self._tools_for_model,
@@ -1852,60 +1896,65 @@ class Team:
1852
1896
  response_format=response_format,
1853
1897
  send_media_to_model=self.send_media_to_model,
1854
1898
  ) # type: ignore
1855
-
1856
- # Check for cancellation after model call
1857
1899
  raise_if_cancelled(run_response.run_id) # type: ignore
1858
1900
 
1859
1901
  # If an output model is provided, generate output using the output model
1860
1902
  await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
1861
-
1862
1903
  # If a parser model is provided, structure the response separately
1863
1904
  await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
1864
1905
 
1865
- # Update TeamRunOutput
1906
+ # 8. Update TeamRunOutput
1866
1907
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
1867
1908
 
1909
+ # Optional: Store media
1868
1910
  if self.store_media:
1869
1911
  self._store_media(run_response, model_response)
1870
1912
  else:
1871
1913
  self._scrub_media_from_run_output(run_response)
1872
1914
 
1915
+ # 9. Add the run to memory
1916
+ team_session.upsert_run(run_response=run_response)
1917
+
1918
+ # 10. Calculate session metrics
1919
+ self._update_session_metrics(session=team_session)
1920
+
1873
1921
  run_response.status = RunStatus.completed
1874
1922
 
1875
- # Parse team response model
1923
+ # 11. Parse team response model
1876
1924
  self._convert_response_to_structured_format(run_response=run_response)
1877
1925
 
1878
1926
  # Set the run duration
1879
1927
  if run_response.metrics:
1880
1928
  run_response.metrics.stop_timer()
1881
1929
 
1882
- # 6. Add the run to session
1883
- session.upsert_run(run_response=run_response)
1884
-
1885
- # 6. Update Team Memory
1930
+ # 12. Update Team Memory
1886
1931
  async for _ in self._amake_memories_and_summaries(
1887
1932
  run_response=run_response,
1888
- session=session,
1933
+ session=team_session,
1889
1934
  run_messages=run_messages,
1890
1935
  user_id=user_id,
1891
1936
  ):
1892
1937
  pass
1893
1938
 
1894
- # 7. Calculate session metrics
1895
- self._update_session_metrics(session=session)
1939
+ # 13. Scrub the stored run based on storage flags
1940
+ if self._scrub_run_output_for_storage(run_response):
1941
+ team_session.upsert_run(run_response=run_response)
1896
1942
 
1897
- # 8. Save session to storage
1898
- self.save_session(session=session)
1943
+ # 14. Save session to storage
1944
+ if self._has_async_db():
1945
+ await self.asave_session(session=team_session)
1946
+ else:
1947
+ self.save_session(session=team_session)
1899
1948
 
1900
- # Log Team Telemetry
1901
- await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1949
+ # Log Team Telemetry
1950
+ await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
1902
1951
 
1903
- # Execute post-hooks after output is generated but before response is returned
1952
+ # 15. Execute post-hooks after output is generated but before response is returned
1904
1953
  if self.post_hooks is not None:
1905
1954
  await self._aexecute_post_hooks(
1906
1955
  hooks=self.post_hooks, # type: ignore
1907
1956
  run_output=run_response,
1908
- session=session,
1957
+ session=team_session,
1909
1958
  user_id=user_id,
1910
1959
  debug_mode=debug_mode,
1911
1960
  **kwargs,
@@ -1913,48 +1962,68 @@ class Team:
1913
1962
 
1914
1963
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
1915
1964
 
1916
- # Always clean up the run tracking
1917
1965
  cleanup_run(run_response.run_id) # type: ignore
1918
1966
 
1919
1967
  return run_response
1920
1968
 
1921
1969
  async def _arun_stream(
1922
1970
  self,
1971
+ input: Union[str, List, Dict, Message, BaseModel],
1923
1972
  run_response: TeamRunOutput,
1924
- session: TeamSession,
1925
- session_state: Dict[str, Any],
1973
+ session_id: str,
1974
+ session_state: Optional[Dict[str, Any]] = None,
1926
1975
  user_id: Optional[str] = None,
1927
- knowledge_filters: Optional[Dict[str, Any]] = None,
1928
- add_history_to_context: Optional[bool] = None,
1929
- add_dependencies_to_context: Optional[bool] = None,
1930
- add_session_state_to_context: Optional[bool] = None,
1931
- metadata: Optional[Dict[str, Any]] = None,
1932
1976
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1933
- dependencies: Optional[Dict[str, Any]] = None,
1934
1977
  stream_intermediate_steps: bool = False,
1935
1978
  yield_run_response: bool = False,
1979
+ add_dependencies_to_context: Optional[bool] = None,
1980
+ add_session_state_to_context: Optional[bool] = None,
1981
+ add_history_to_context: Optional[bool] = None,
1982
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1983
+ metadata: Optional[Dict[str, Any]] = None,
1984
+ audio: Optional[Sequence[Audio]] = None,
1985
+ images: Optional[Sequence[Image]] = None,
1986
+ videos: Optional[Sequence[Video]] = None,
1987
+ files: Optional[Sequence[File]] = None,
1936
1988
  debug_mode: Optional[bool] = None,
1989
+ dependencies: Optional[Dict[str, Any]] = None,
1937
1990
  **kwargs: Any,
1938
1991
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
1939
1992
  """Run the Team and return the response.
1940
1993
 
1941
1994
  Steps:
1942
1995
  1. Resolve dependencies
1943
- 2. Prepare run messages
1944
- 3. Reason about the task(s) if reasoning is enabled
1945
- 4. Get a response from the model
1946
- 5. Add the run to Team Session
1947
- 6. Update Team Memory
1948
- 7. Create the run completed event
1949
- 8. Calculate session metrics
1950
- 9. Save session to storage
1996
+ 2. Read or create session
1997
+ 3. Update metadata and session state
1998
+ 4. Execute pre-hooks
1999
+ 5. Determine tools for model
2000
+ 6. Prepare run messages
2001
+ 7. Yield the run started event
2002
+ 8. Reason about the task(s) if reasoning is enabled
2003
+ 9. Get a response from the model
2004
+ 10. Add the run to memory
2005
+ 11. Update Team Memory
2006
+ 12. Calculate session metrics
2007
+ 13. Create the run completed event
2008
+ 14. Scrub the stored run if needed
2009
+ 15. Save session to storage
1951
2010
  """
1952
2011
 
1953
- # 1. Resolve callable dependencies if present
2012
+ # 1. Resolve dependencies
1954
2013
  if dependencies is not None:
1955
- await self._aresolve_run_dependencies(dependencies=dependencies)
2014
+ self._resolve_run_dependencies(dependencies=dependencies)
2015
+
2016
+ # 2. Read or create session. Reads from the database if provided.
2017
+ if self._has_async_db():
2018
+ team_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
2019
+ else:
2020
+ team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
2021
+
2022
+ # 3. Update metadata and session state
2023
+ self._update_metadata(session=team_session)
2024
+ session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
1956
2025
 
1957
- # Execute pre-hooks
2026
+ # 4. Execute pre-hooks
1958
2027
  run_input = cast(TeamRunInput, run_response.input)
1959
2028
  self.model = cast(Model, self.model)
1960
2029
  if self.pre_hooks is not None:
@@ -1962,7 +2031,7 @@ class Team:
1962
2031
  hooks=self.pre_hooks, # type: ignore
1963
2032
  run_response=run_response,
1964
2033
  run_input=run_input,
1965
- session=session,
2034
+ session=team_session,
1966
2035
  user_id=user_id,
1967
2036
  debug_mode=debug_mode,
1968
2037
  **kwargs,
@@ -1970,42 +2039,40 @@ class Team:
1970
2039
  async for pre_hook_event in pre_hook_iterator:
1971
2040
  yield pre_hook_event
1972
2041
 
1973
- # Initialize the team run context
2042
+ # 5. Determine tools for model
1974
2043
  team_run_context: Dict[str, Any] = {}
1975
-
2044
+ self.model = cast(Model, self.model)
1976
2045
  self.determine_tools_for_model(
1977
2046
  model=self.model,
1978
2047
  run_response=run_response,
1979
2048
  team_run_context=team_run_context,
1980
- session=session,
2049
+ session=team_session, # type: ignore
1981
2050
  session_state=session_state,
1982
2051
  user_id=user_id,
1983
2052
  async_mode=True,
1984
2053
  knowledge_filters=knowledge_filters,
1985
- input_message=run_input.input_content,
1986
- images=run_input.images,
1987
- videos=run_input.videos,
1988
- audio=run_input.audios,
1989
- files=run_input.files,
2054
+ input_message=input,
2055
+ images=images,
2056
+ videos=videos,
2057
+ audio=audio,
2058
+ files=files,
1990
2059
  debug_mode=debug_mode,
1991
2060
  add_history_to_context=add_history_to_context,
1992
- add_dependencies_to_context=add_dependencies_to_context,
1993
- add_session_state_to_context=add_session_state_to_context,
1994
2061
  dependencies=dependencies,
1995
2062
  metadata=metadata,
1996
2063
  )
1997
2064
 
1998
- # 2. Prepare run messages
1999
- run_messages = self._get_run_messages(
2065
+ # 6. Prepare run messages
2066
+ run_messages = await self._aget_run_messages(
2000
2067
  run_response=run_response,
2001
- session=session,
2068
+ session=team_session, # type: ignore
2002
2069
  session_state=session_state,
2003
2070
  user_id=user_id,
2004
- input_message=run_input.input_content,
2005
- audio=run_input.audios,
2006
- images=run_input.images,
2007
- videos=run_input.videos,
2008
- files=run_input.files,
2071
+ input_message=input,
2072
+ audio=audio,
2073
+ images=images,
2074
+ videos=videos,
2075
+ files=files,
2009
2076
  knowledge_filters=knowledge_filters,
2010
2077
  add_history_to_context=add_history_to_context,
2011
2078
  dependencies=dependencies,
@@ -2021,13 +2088,11 @@ class Team:
2021
2088
  register_run(run_response.run_id) # type: ignore
2022
2089
 
2023
2090
  try:
2024
- # Start the Run by yielding a RunStarted event
2091
+ # 7. Yield the run started event
2025
2092
  if stream_intermediate_steps:
2026
- yield self._handle_event(
2027
- create_team_run_started_event(from_run_response=run_response), run_response
2028
- )
2093
+ yield self._handle_event(create_team_run_started_event(from_run_response=run_response), run_response)
2029
2094
 
2030
- # 3. Reason about the task(s) if reasoning is enabled
2095
+ # 8. Reason about the task(s) if reasoning is enabled
2031
2096
  async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
2032
2097
  raise_if_cancelled(run_response.run_id) # type: ignore
2033
2098
  yield item
@@ -2035,10 +2100,10 @@ class Team:
2035
2100
  # Check for cancellation before model processing
2036
2101
  raise_if_cancelled(run_response.run_id) # type: ignore
2037
2102
 
2038
- # 4. Get a response from the model
2103
+ # 9. Get a response from the model
2039
2104
  if self.output_model is None:
2040
2105
  async for event in self._ahandle_model_response_stream(
2041
- session=session,
2106
+ session=team_session,
2042
2107
  run_response=run_response,
2043
2108
  run_messages=run_messages,
2044
2109
  response_format=response_format,
@@ -2048,7 +2113,7 @@ class Team:
2048
2113
  yield event
2049
2114
  else:
2050
2115
  async for event in self._ahandle_model_response_stream(
2051
- session=session,
2116
+ session=team_session,
2052
2117
  run_response=run_response,
2053
2118
  run_messages=run_messages,
2054
2119
  response_format=response_format,
@@ -2067,7 +2132,7 @@ class Team:
2067
2132
  yield event
2068
2133
 
2069
2134
  async for event in self._agenerate_response_with_output_model_stream(
2070
- session=session,
2135
+ session=team_session,
2071
2136
  run_response=run_response,
2072
2137
  run_messages=run_messages,
2073
2138
  stream_intermediate_steps=stream_intermediate_steps,
@@ -2080,38 +2145,41 @@ class Team:
2080
2145
 
2081
2146
  # If a parser model is provided, structure the response separately
2082
2147
  async for event in self._aparse_response_with_parser_model_stream(
2083
- session=session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
2148
+ session=team_session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
2084
2149
  ):
2085
2150
  yield event
2086
2151
 
2087
2152
  run_response.status = RunStatus.completed
2088
2153
 
2089
- # Set the run duration
2090
- if run_response.metrics:
2091
- run_response.metrics.stop_timer()
2092
-
2093
- # 5. Add the run to Team Session
2094
- session.upsert_run(run_response=run_response)
2154
+ # 10. Add the run to memory
2155
+ team_session.upsert_run(run_response=run_response)
2095
2156
 
2096
- # 6. Update Team Memory
2157
+ # 11. Update Team Memory
2097
2158
  async for event in self._amake_memories_and_summaries(
2098
2159
  run_response=run_response,
2099
- session=session,
2160
+ session=team_session,
2100
2161
  run_messages=run_messages,
2101
2162
  user_id=user_id,
2102
2163
  ):
2103
2164
  yield event
2104
2165
 
2105
- # 7. Create the run completed event
2166
+ # 12. Calculate session metrics
2167
+ self._update_session_metrics(session=team_session)
2168
+
2169
+ # 13. Create the run completed event
2106
2170
  completed_event = self._handle_event(
2107
2171
  create_team_run_completed_event(from_run_response=run_response), run_response
2108
2172
  )
2109
2173
 
2110
- # 8. Calculate session metrics
2111
- self._update_session_metrics(session=session)
2174
+ # 14. Scrub the stored run based on storage flags
2175
+ if self._scrub_run_output_for_storage(run_response):
2176
+ team_session.upsert_run(run_response=run_response)
2112
2177
 
2113
- # 9. Save session to storage
2114
- self.save_session(session=session)
2178
+ # 15. Save the session to storage
2179
+ if self._has_async_db():
2180
+ await self.asave_session(session=team_session)
2181
+ else:
2182
+ self.save_session(session=team_session)
2115
2183
 
2116
2184
  if stream_intermediate_steps:
2117
2185
  yield completed_event
@@ -2120,7 +2188,7 @@ class Team:
2120
2188
  yield run_response
2121
2189
 
2122
2190
  # Log Team Telemetry
2123
- await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
2191
+ await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
2124
2192
 
2125
2193
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
2126
2194
 
@@ -2137,8 +2205,11 @@ class Team:
2137
2205
  )
2138
2206
 
2139
2207
  # Add the RunOutput to Team Session even when cancelled
2140
- session.upsert_run(run_response=run_response)
2141
- self.save_session(session=session)
2208
+ team_session.upsert_run(run_response=run_response)
2209
+ if self._has_async_db():
2210
+ await self.asave_session(session=team_session)
2211
+ else:
2212
+ self.save_session(session=team_session)
2142
2213
  finally:
2143
2214
  # Always clean up the run tracking
2144
2215
  cleanup_run(run_response.run_id) # type: ignore
@@ -2245,25 +2316,8 @@ class Team:
2245
2316
  images=images, videos=videos, audios=audio, files=files
2246
2317
  )
2247
2318
 
2248
- # Create RunInput to capture the original user input
2249
- run_input = TeamRunInput(
2250
- input_content=validated_input,
2251
- images=image_artifacts,
2252
- videos=video_artifacts,
2253
- audios=audio_artifacts,
2254
- files=file_artifacts,
2255
- )
2256
-
2257
- team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
2258
- self._update_metadata(session=team_session)
2259
-
2260
- # Update session state from DB
2261
- session_state = self._load_session_state(session=team_session, session_state=session_state)
2262
-
2263
- # Determine run dependencies (runtime override takes priority)
2319
+ # Resolve variables
2264
2320
  run_dependencies = dependencies if dependencies is not None else self.dependencies
2265
-
2266
- # Determine runtime context parameters
2267
2321
  add_dependencies = (
2268
2322
  add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
2269
2323
  )
@@ -2274,10 +2328,14 @@ class Team:
2274
2328
  )
2275
2329
  add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
2276
2330
 
2277
- effective_filters = knowledge_filters
2278
- # When filters are passed manually
2279
- if self.knowledge_filters or knowledge_filters:
2280
- effective_filters = self._get_effective_filters(knowledge_filters)
2331
+ # Create RunInput to capture the original user input
2332
+ run_input = TeamRunInput(
2333
+ input_content=validated_input,
2334
+ images=image_artifacts,
2335
+ videos=video_artifacts,
2336
+ audios=audio_artifacts,
2337
+ files=files,
2338
+ )
2281
2339
 
2282
2340
  # Use stream override value when necessary
2283
2341
  if stream is None:
@@ -2308,6 +2366,11 @@ class Team:
2308
2366
  else:
2309
2367
  metadata = self.metadata
2310
2368
 
2369
+ # Get knowledge filters
2370
+ effective_filters = knowledge_filters
2371
+ if self.knowledge_filters or knowledge_filters:
2372
+ effective_filters = self._get_effective_filters(knowledge_filters)
2373
+
2311
2374
  # Create a new run_response for this attempt
2312
2375
  run_response = TeamRunOutput(
2313
2376
  run_id=run_id,
@@ -2337,8 +2400,9 @@ class Team:
2337
2400
  try:
2338
2401
  if stream:
2339
2402
  response_iterator = self._arun_stream(
2403
+ input=validated_input,
2340
2404
  run_response=run_response,
2341
- session=team_session, # type: ignore
2405
+ session_id=session_id,
2342
2406
  session_state=session_state,
2343
2407
  user_id=user_id,
2344
2408
  knowledge_filters=effective_filters,
@@ -2356,18 +2420,23 @@ class Team:
2356
2420
  return response_iterator # type: ignore
2357
2421
  else:
2358
2422
  return self._arun( # type: ignore
2423
+ input=validated_input,
2359
2424
  run_response=run_response,
2360
- session=team_session, # type: ignore
2361
- user_id=user_id,
2425
+ session_id=session_id,
2362
2426
  session_state=session_state,
2427
+ user_id=user_id,
2428
+ audio=audio,
2429
+ images=images,
2430
+ videos=videos,
2431
+ files=files,
2363
2432
  knowledge_filters=effective_filters,
2364
2433
  add_history_to_context=add_history,
2365
2434
  add_dependencies_to_context=add_dependencies,
2366
2435
  add_session_state_to_context=add_session_state,
2367
2436
  metadata=metadata,
2368
2437
  response_format=response_format,
2369
- dependencies=run_dependencies,
2370
2438
  debug_mode=debug_mode,
2439
+ dependencies=run_dependencies,
2371
2440
  **kwargs,
2372
2441
  )
2373
2442
 
@@ -2385,17 +2454,6 @@ class Team:
2385
2454
  import time
2386
2455
 
2387
2456
  time.sleep(delay)
2388
- except RunCancelledException as e:
2389
- # Handle run cancellation
2390
- log_info(f"Team run {run_response.run_id} was cancelled")
2391
- run_response.content = str(e)
2392
- run_response.status = RunStatus.cancelled
2393
-
2394
- # Add the RunOutput to Team Session even when cancelled
2395
- team_session.upsert_run(run_response=run_response)
2396
- self.save_session(session=team_session)
2397
-
2398
- return run_response
2399
2457
  except KeyboardInterrupt:
2400
2458
  run_response.content = "Operation cancelled by user"
2401
2459
  run_response.status = RunStatus.cancelled
@@ -3025,7 +3083,9 @@ class Team:
3025
3083
  run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3026
3084
  )
3027
3085
  if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
3028
- tasks.append(self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id))
3086
+ tasks.append(
3087
+ self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id, team_id=self.id)
3088
+ )
3029
3089
 
3030
3090
  if self.session_summary_manager is not None:
3031
3091
  tasks.append(self.session_summary_manager.acreate_session_summary(session=session))
@@ -3395,6 +3455,11 @@ class Team:
3395
3455
  tags_to_include_in_markdown: Optional[Set[str]] = None,
3396
3456
  **kwargs: Any,
3397
3457
  ) -> None:
3458
+ if self._has_async_db():
3459
+ raise Exception(
3460
+ "This method is not supported with an async DB. Please use the async version of this method."
3461
+ )
3462
+
3398
3463
  if not tags_to_include_in_markdown:
3399
3464
  tags_to_include_in_markdown = {"think", "thinking"}
3400
3465
 
@@ -3613,6 +3678,53 @@ class Team:
3613
3678
  message.image_output = None
3614
3679
  message.video_output = None
3615
3680
 
3681
+ def _scrub_tool_results_from_run_output(self, run_response: TeamRunOutput) -> None:
3682
+ """
3683
+ Remove all tool-related data from TeamRunOutput when store_tool_results=False.
3684
+ This includes tool calls, tool results, and tool-related message fields.
3685
+ """
3686
+ # Remove tool results (messages with role="tool")
3687
+ if run_response.messages:
3688
+ run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
3689
+ # Also scrub tool-related fields from remaining messages
3690
+ for message in run_response.messages:
3691
+ self._scrub_tool_data_from_message(message)
3692
+
3693
+ def _scrub_tool_data_from_message(self, message: Message) -> None:
3694
+ """Remove tool-related data from a Message object."""
3695
+ message.tool_calls = None
3696
+ message.tool_call_id = None
3697
+
3698
+ def _scrub_history_messages_from_run_output(self, run_response: TeamRunOutput) -> None:
3699
+ """
3700
+ Remove all history messages from TeamRunOutput when store_history_messages=False.
3701
+ This removes messages that were loaded from the team's memory.
3702
+ """
3703
+ # Remove messages with from_history=True
3704
+ if run_response.messages:
3705
+ run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
3706
+
3707
+ def _scrub_run_output_for_storage(self, run_response: TeamRunOutput) -> bool:
3708
+ """
3709
+ Scrub run output based on storage flags before persisting to database.
3710
+ Returns True if any scrubbing was done, False otherwise.
3711
+ """
3712
+ scrubbed = False
3713
+
3714
+ if not self.store_media:
3715
+ self._scrub_media_from_run_output(run_response)
3716
+ scrubbed = True
3717
+
3718
+ if not self.store_tool_results:
3719
+ self._scrub_tool_results_from_run_output(run_response)
3720
+ scrubbed = True
3721
+
3722
+ if not self.store_history_messages:
3723
+ self._scrub_history_messages_from_run_output(run_response)
3724
+ scrubbed = True
3725
+
3726
+ return scrubbed
3727
+
3616
3728
  def _validate_media_object_id(
3617
3729
  self,
3618
3730
  images: Optional[Sequence[Image]] = None,
@@ -3856,12 +3968,15 @@ class Team:
3856
3968
 
3857
3969
  # If a reasoning model is provided, use it to generate reasoning
3858
3970
  if reasoning_model_provided:
3971
+ from agno.reasoning.anthropic import is_anthropic_reasoning_model
3859
3972
  from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
3860
3973
  from agno.reasoning.deepseek import is_deepseek_reasoning_model
3974
+ from agno.reasoning.gemini import is_gemini_reasoning_model
3861
3975
  from agno.reasoning.groq import is_groq_reasoning_model
3862
3976
  from agno.reasoning.helpers import get_reasoning_agent
3863
3977
  from agno.reasoning.ollama import is_ollama_reasoning_model
3864
3978
  from agno.reasoning.openai import is_openai_reasoning_model
3979
+ from agno.reasoning.vertexai import is_vertexai_reasoning_model
3865
3980
 
3866
3981
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
3867
3982
  reasoning_model=reasoning_model,
@@ -3874,8 +3989,20 @@ class Team:
3874
3989
  is_openai = is_openai_reasoning_model(reasoning_model)
3875
3990
  is_ollama = is_ollama_reasoning_model(reasoning_model)
3876
3991
  is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
3992
+ is_gemini = is_gemini_reasoning_model(reasoning_model)
3993
+ is_anthropic = is_anthropic_reasoning_model(reasoning_model)
3994
+ is_vertexai = is_vertexai_reasoning_model(reasoning_model)
3877
3995
 
3878
- if is_deepseek or is_groq or is_openai or is_ollama or is_ai_foundry:
3996
+ if (
3997
+ is_deepseek
3998
+ or is_groq
3999
+ or is_openai
4000
+ or is_ollama
4001
+ or is_ai_foundry
4002
+ or is_gemini
4003
+ or is_anthropic
4004
+ or is_vertexai
4005
+ ):
3879
4006
  reasoning_message: Optional[Message] = None
3880
4007
  if is_deepseek:
3881
4008
  from agno.reasoning.deepseek import get_deepseek_reasoning
@@ -3912,6 +4039,27 @@ class Team:
3912
4039
  reasoning_message = get_ai_foundry_reasoning(
3913
4040
  reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
3914
4041
  )
4042
+ elif is_gemini:
4043
+ from agno.reasoning.gemini import get_gemini_reasoning
4044
+
4045
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
4046
+ reasoning_message = get_gemini_reasoning(
4047
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4048
+ )
4049
+ elif is_anthropic:
4050
+ from agno.reasoning.anthropic import get_anthropic_reasoning
4051
+
4052
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
4053
+ reasoning_message = get_anthropic_reasoning(
4054
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4055
+ )
4056
+ elif is_vertexai:
4057
+ from agno.reasoning.vertexai import get_vertexai_reasoning
4058
+
4059
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
4060
+ reasoning_message = get_vertexai_reasoning(
4061
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4062
+ )
3915
4063
 
3916
4064
  if reasoning_message is None:
3917
4065
  log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
@@ -4090,12 +4238,15 @@ class Team:
4090
4238
 
4091
4239
  # If a reasoning model is provided, use it to generate reasoning
4092
4240
  if reasoning_model_provided:
4241
+ from agno.reasoning.anthropic import is_anthropic_reasoning_model
4093
4242
  from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
4094
4243
  from agno.reasoning.deepseek import is_deepseek_reasoning_model
4244
+ from agno.reasoning.gemini import is_gemini_reasoning_model
4095
4245
  from agno.reasoning.groq import is_groq_reasoning_model
4096
4246
  from agno.reasoning.helpers import get_reasoning_agent
4097
4247
  from agno.reasoning.ollama import is_ollama_reasoning_model
4098
4248
  from agno.reasoning.openai import is_openai_reasoning_model
4249
+ from agno.reasoning.vertexai import is_vertexai_reasoning_model
4099
4250
 
4100
4251
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
4101
4252
  reasoning_model=reasoning_model,
@@ -4108,8 +4259,20 @@ class Team:
4108
4259
  is_openai = is_openai_reasoning_model(reasoning_model)
4109
4260
  is_ollama = is_ollama_reasoning_model(reasoning_model)
4110
4261
  is_ai_foundry = is_ai_foundry_reasoning_model(reasoning_model)
4262
+ is_gemini = is_gemini_reasoning_model(reasoning_model)
4263
+ is_anthropic = is_anthropic_reasoning_model(reasoning_model)
4264
+ is_vertexai = is_vertexai_reasoning_model(reasoning_model)
4111
4265
 
4112
- if is_deepseek or is_groq or is_openai or is_ollama or is_ai_foundry:
4266
+ if (
4267
+ is_deepseek
4268
+ or is_groq
4269
+ or is_openai
4270
+ or is_ollama
4271
+ or is_ai_foundry
4272
+ or is_gemini
4273
+ or is_anthropic
4274
+ or is_vertexai
4275
+ ):
4113
4276
  reasoning_message: Optional[Message] = None
4114
4277
  if is_deepseek:
4115
4278
  from agno.reasoning.deepseek import aget_deepseek_reasoning
@@ -4146,6 +4309,27 @@ class Team:
4146
4309
  reasoning_message = get_ai_foundry_reasoning(
4147
4310
  reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4148
4311
  )
4312
+ elif is_gemini:
4313
+ from agno.reasoning.gemini import aget_gemini_reasoning
4314
+
4315
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
4316
+ reasoning_message = await aget_gemini_reasoning(
4317
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4318
+ )
4319
+ elif is_anthropic:
4320
+ from agno.reasoning.anthropic import aget_anthropic_reasoning
4321
+
4322
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
4323
+ reasoning_message = await aget_anthropic_reasoning(
4324
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4325
+ )
4326
+ elif is_vertexai:
4327
+ from agno.reasoning.vertexai import aget_vertexai_reasoning
4328
+
4329
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
4330
+ reasoning_message = await aget_vertexai_reasoning(
4331
+ reasoning_agent=reasoning_agent, messages=run_messages.get_input_messages()
4332
+ )
4149
4333
 
4150
4334
  if reasoning_message is None:
4151
4335
  log_warning("Reasoning error. Reasoning response is None, continuing regular session...")
@@ -4715,7 +4899,7 @@ class Team:
4715
4899
  system_message_content += f"{indent * ' '} - Name: {member.name}\n"
4716
4900
  if member.role is not None:
4717
4901
  system_message_content += f"{indent * ' '} - Role: {member.role}\n"
4718
- if member.tools and self.add_member_tools_to_context:
4902
+ if member.tools is not None and member.tools != [] and self.add_member_tools_to_context:
4719
4903
  system_message_content += f"{indent * ' '} - Member tools:\n"
4720
4904
  for _tool in member.tools:
4721
4905
  if isinstance(_tool, Toolkit):
@@ -5025,17 +5209,318 @@ class Team:
5025
5209
 
5026
5210
  return Message(role=self.system_message_role, content=system_message_content.strip())
5027
5211
 
5028
- def _get_formatted_session_state_for_system_message(self, session_state: Dict[str, Any]) -> str:
5029
- return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
5030
-
5031
- def _get_run_messages(
5212
+ async def aget_system_message(
5032
5213
  self,
5033
- *,
5034
- run_response: TeamRunOutput,
5035
5214
  session: TeamSession,
5036
5215
  session_state: Optional[Dict[str, Any]] = None,
5037
5216
  user_id: Optional[str] = None,
5038
- input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
5217
+ audio: Optional[Sequence[Audio]] = None,
5218
+ images: Optional[Sequence[Image]] = None,
5219
+ videos: Optional[Sequence[Video]] = None,
5220
+ files: Optional[Sequence[File]] = None,
5221
+ dependencies: Optional[Dict[str, Any]] = None,
5222
+ metadata: Optional[Dict[str, Any]] = None,
5223
+ add_session_state_to_context: Optional[bool] = None,
5224
+ ) -> Optional[Message]:
5225
+ """Get the system message for the team."""
5226
+
5227
+ # 1. If the system_message is provided, use that.
5228
+ if self.system_message is not None:
5229
+ if isinstance(self.system_message, Message):
5230
+ return self.system_message
5231
+
5232
+ sys_message_content: str = ""
5233
+ if isinstance(self.system_message, str):
5234
+ sys_message_content = self.system_message
5235
+ elif callable(self.system_message):
5236
+ sys_message_content = self.system_message(agent=self)
5237
+ if not isinstance(sys_message_content, str):
5238
+ raise Exception("system_message must return a string")
5239
+
5240
+ # Format the system message with the session state variables
5241
+ if self.resolve_in_context:
5242
+ sys_message_content = self._format_message_with_state_variables(
5243
+ sys_message_content,
5244
+ user_id=user_id,
5245
+ session_state=session_state,
5246
+ dependencies=dependencies,
5247
+ metadata=metadata,
5248
+ )
5249
+
5250
+ # type: ignore
5251
+ return Message(role=self.system_message_role, content=sys_message_content)
5252
+
5253
+ # 1. Build and return the default system message for the Team.
5254
+ # 1.1 Build the list of instructions for the system message
5255
+ self.model = cast(Model, self.model)
5256
+ instructions: List[str] = []
5257
+ if self.instructions is not None:
5258
+ _instructions = self.instructions
5259
+ if callable(self.instructions):
5260
+ import inspect
5261
+
5262
+ signature = inspect.signature(self.instructions)
5263
+ if "team" in signature.parameters:
5264
+ _instructions = self.instructions(team=self)
5265
+ elif "agent" in signature.parameters:
5266
+ _instructions = self.instructions(agent=self)
5267
+ else:
5268
+ _instructions = self.instructions()
5269
+
5270
+ if isinstance(_instructions, str):
5271
+ instructions.append(_instructions)
5272
+ elif isinstance(_instructions, list):
5273
+ instructions.extend(_instructions)
5274
+
5275
+ # 1.2 Add instructions from the Model
5276
+ _model_instructions = self.model.get_instructions_for_model(self._tools_for_model)
5277
+ if _model_instructions is not None:
5278
+ instructions.extend(_model_instructions)
5279
+
5280
+ # 1.3 Build a list of additional information for the system message
5281
+ additional_information: List[str] = []
5282
+ # 1.3.1 Add instructions for using markdown
5283
+ if self.markdown and self.output_schema is None:
5284
+ additional_information.append("Use markdown to format your answers.")
5285
+ # 1.3.2 Add the current datetime
5286
+ if self.add_datetime_to_context:
5287
+ from datetime import datetime
5288
+
5289
+ tz = None
5290
+
5291
+ if self.timezone_identifier:
5292
+ try:
5293
+ from zoneinfo import ZoneInfo
5294
+
5295
+ tz = ZoneInfo(self.timezone_identifier)
5296
+ except Exception:
5297
+ log_warning("Invalid timezone identifier")
5298
+
5299
+ time = datetime.now(tz) if tz else datetime.now()
5300
+
5301
+ additional_information.append(f"The current time is {time}.")
5302
+
5303
+ # 1.3.3 Add the current location
5304
+ if self.add_location_to_context:
5305
+ from agno.utils.location import get_location
5306
+
5307
+ location = get_location()
5308
+ if location:
5309
+ location_str = ", ".join(
5310
+ filter(None, [location.get("city"), location.get("region"), location.get("country")])
5311
+ )
5312
+ if location_str:
5313
+ additional_information.append(f"Your approximate location is: {location_str}.")
5314
+
5315
+ # 1.3.4 Add team name if provided
5316
+ if self.name is not None and self.add_name_to_context:
5317
+ additional_information.append(f"Your name is: {self.name}.")
5318
+
5319
+ if self.knowledge is not None and self.enable_agentic_knowledge_filters:
5320
+ valid_filters = getattr(self.knowledge, "valid_metadata_filters", None)
5321
+ if valid_filters:
5322
+ valid_filters_str = ", ".join(valid_filters)
5323
+ additional_information.append(
5324
+ dedent(f"""
5325
+ The knowledge base contains documents with these metadata filters: {valid_filters_str}.
5326
+ Always use filters when the user query indicates specific metadata.
5327
+ Examples:
5328
+ 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
5329
+ 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
5330
+ 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
5331
+ General Guidelines:
5332
+ - Always analyze the user query to identify relevant metadata.
5333
+ - Use the most specific filter(s) possible to narrow down results.
5334
+ - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
5335
+ - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
5336
+ You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
5337
+ """)
5338
+ )
5339
+
5340
+ # 2 Build the default system message for the Agent.
5341
+ system_message_content: str = ""
5342
+ system_message_content += "You are the leader of a team and sub-teams of AI Agents.\n"
5343
+ system_message_content += "Your task is to coordinate the team to complete the user's request.\n"
5344
+
5345
+ system_message_content += "\nHere are the members in your team:\n"
5346
+ system_message_content += "<team_members>\n"
5347
+ system_message_content += self.get_members_system_message_content()
5348
+ if self.get_member_information_tool:
5349
+ system_message_content += "If you need to get information about your team members, you can use the `get_member_information` tool at any time.\n"
5350
+ system_message_content += "</team_members>\n"
5351
+
5352
+ system_message_content += "\n<how_to_respond>\n"
5353
+
5354
+ if self.delegate_task_to_all_members:
5355
+ system_message_content += (
5356
+ "- Your role is to forward tasks to members in your team with the highest likelihood of completing the user's request.\n"
5357
+ "- You can either respond directly or use the `delegate_task_to_members` tool to delegate a task to all members in your team to get a collaborative response.\n"
5358
+ "- To delegate a task to all members in your team, call `delegate_task_to_members` ONLY once. This will delegate a task to all members in your team.\n"
5359
+ "- Analyze the responses from all members and evaluate whether the task has been completed.\n"
5360
+ "- If you feel the task has been completed, you can stop and respond to the user.\n"
5361
+ )
5362
+ else:
5363
+ system_message_content += (
5364
+ "- Your role is to delegate tasks to members in your team with the highest likelihood of completing the user's request.\n"
5365
+ "- Carefully analyze the tools available to the members and their roles before delegating tasks.\n"
5366
+ "- You cannot use a member tool directly. You can only delegate tasks to members.\n"
5367
+ "- When you delegate a task to another member, make sure to include:\n"
5368
+ " - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
5369
+ " - task_description (str): A clear description of the task.\n"
5370
+ " - expected_output (str): The expected output.\n"
5371
+ "- You can delegate tasks to multiple members at once.\n"
5372
+ "- You must always analyze the responses from members before responding to the user.\n"
5373
+ "- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
5374
+ "- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
5375
+ "- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
5376
+ "- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
5377
+ )
5378
+ system_message_content += "</how_to_respond>\n\n"
5379
+
5380
+ # Attached media
5381
+ if audio is not None or images is not None or videos is not None or files is not None:
5382
+ system_message_content += "<attached_media>\n"
5383
+ system_message_content += "You have the following media attached to your message:\n"
5384
+ if audio is not None and len(audio) > 0:
5385
+ system_message_content += " - Audio\n"
5386
+ if images is not None and len(images) > 0:
5387
+ system_message_content += " - Images\n"
5388
+ if videos is not None and len(videos) > 0:
5389
+ system_message_content += " - Videos\n"
5390
+ if files is not None and len(files) > 0:
5391
+ system_message_content += " - Files\n"
5392
+ system_message_content += "</attached_media>\n\n"
5393
+
5394
+ # Then add memories to the system prompt
5395
+ if self.add_memories_to_context:
5396
+ _memory_manager_not_set = False
5397
+ if not user_id:
5398
+ user_id = "default"
5399
+ if self.memory_manager is None:
5400
+ self._set_memory_manager()
5401
+ _memory_manager_not_set = True
5402
+
5403
+ if self._has_async_db():
5404
+ user_memories = await self.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
5405
+ else:
5406
+ user_memories = self.memory_manager.get_user_memories(user_id=user_id) # type: ignore
5407
+
5408
+ if user_memories and len(user_memories) > 0:
5409
+ system_message_content += (
5410
+ "You have access to memories from previous interactions with the user that you can use:\n\n"
5411
+ )
5412
+ system_message_content += "<memories_from_previous_interactions>"
5413
+ for _memory in user_memories: # type: ignore
5414
+ system_message_content += f"\n- {_memory.memory}"
5415
+ system_message_content += "\n</memories_from_previous_interactions>\n\n"
5416
+ system_message_content += (
5417
+ "Note: this information is from previous interactions and may be updated in this conversation. "
5418
+ "You should always prefer information from this conversation over the past memories.\n"
5419
+ )
5420
+ else:
5421
+ system_message_content += (
5422
+ "You have the capability to retain memories from previous interactions with the user, "
5423
+ "but have not had any interactions with the user yet.\n"
5424
+ )
5425
+ if _memory_manager_not_set:
5426
+ self.memory_manager = None
5427
+
5428
+ if self.enable_agentic_memory:
5429
+ system_message_content += (
5430
+ "\n<updating_user_memories>\n"
5431
+ "- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
5432
+ "- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
5433
+ "- Memories should include details that could personalize ongoing interactions with the user.\n"
5434
+ "- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
5435
+ "- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
5436
+ "- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
5437
+ "</updating_user_memories>\n\n"
5438
+ )
5439
+
5440
+ # Then add a summary of the interaction to the system prompt
5441
+ if self.add_session_summary_to_context and session.summary is not None:
5442
+ system_message_content += "Here is a brief summary of your previous interactions:\n\n"
5443
+ system_message_content += "<summary_of_previous_interactions>\n"
5444
+ system_message_content += session.summary.summary
5445
+ system_message_content += "\n</summary_of_previous_interactions>\n\n"
5446
+ system_message_content += (
5447
+ "Note: this information is from previous interactions and may be outdated. "
5448
+ "You should ALWAYS prefer information from this conversation over the past summary.\n\n"
5449
+ )
5450
+
5451
+ if self.description is not None:
5452
+ system_message_content += f"<description>\n{self.description}\n</description>\n\n"
5453
+
5454
+ if self.role is not None:
5455
+ system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
5456
+
5457
+ # 3.3.5 Then add instructions for the Agent
5458
+ if len(instructions) > 0:
5459
+ system_message_content += "<instructions>"
5460
+ if len(instructions) > 1:
5461
+ for _upi in instructions:
5462
+ system_message_content += f"\n- {_upi}"
5463
+ else:
5464
+ system_message_content += "\n" + instructions[0]
5465
+ system_message_content += "\n</instructions>\n\n"
5466
+ # 3.3.6 Add additional information
5467
+ if len(additional_information) > 0:
5468
+ system_message_content += "<additional_information>"
5469
+ for _ai in additional_information:
5470
+ system_message_content += f"\n- {_ai}"
5471
+ system_message_content += "\n</additional_information>\n\n"
5472
+ # 3.3.7 Then add instructions for the tools
5473
+ if self._tool_instructions is not None:
5474
+ for _ti in self._tool_instructions:
5475
+ system_message_content += f"{_ti}\n"
5476
+
5477
+ # Format the system message with the session state variables
5478
+ if self.resolve_in_context:
5479
+ system_message_content = self._format_message_with_state_variables(
5480
+ system_message_content,
5481
+ user_id=user_id,
5482
+ session_state=session_state,
5483
+ dependencies=dependencies,
5484
+ metadata=metadata,
5485
+ )
5486
+
5487
+ system_message_from_model = self.model.get_system_message_for_model(self._tools_for_model)
5488
+ if system_message_from_model is not None:
5489
+ system_message_content += system_message_from_model
5490
+
5491
+ if self.expected_output is not None:
5492
+ system_message_content += f"<expected_output>\n{self.expected_output.strip()}\n</expected_output>\n\n"
5493
+
5494
+ if self.additional_context is not None:
5495
+ system_message_content += (
5496
+ f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
5497
+ )
5498
+
5499
+ if self.add_session_state_to_context:
5500
+ system_message_content += f"<session_state>\n{session_state}\n</session_state>\n\n"
5501
+
5502
+ # Add the JSON output prompt if output_schema is provided and structured_outputs is False
5503
+ if (
5504
+ self.output_schema is not None
5505
+ and self.use_json_mode
5506
+ and self.model
5507
+ and self.model.supports_native_structured_outputs
5508
+ ):
5509
+ system_message_content += f"{self._get_json_output_prompt()}"
5510
+
5511
+ return Message(role=self.system_message_role, content=system_message_content.strip())
5512
+
5513
+ def _get_formatted_session_state_for_system_message(self, session_state: Dict[str, Any]) -> str:
5514
+ return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
5515
+
5516
+ def _get_run_messages(
5517
+ self,
5518
+ *,
5519
+ run_response: TeamRunOutput,
5520
+ session: TeamSession,
5521
+ session_state: Optional[Dict[str, Any]] = None,
5522
+ user_id: Optional[str] = None,
5523
+ input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
5039
5524
  audio: Optional[Sequence[Audio]] = None,
5040
5525
  images: Optional[Sequence[Image]] = None,
5041
5526
  videos: Optional[Sequence[Video]] = None,
@@ -5163,6 +5648,134 @@ class Team:
5163
5648
 
5164
5649
  return run_messages
5165
5650
 
5651
+ async def _aget_run_messages(
5652
+ self,
5653
+ *,
5654
+ run_response: TeamRunOutput,
5655
+ session: TeamSession,
5656
+ session_state: Optional[Dict[str, Any]] = None,
5657
+ user_id: Optional[str] = None,
5658
+ input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
5659
+ audio: Optional[Sequence[Audio]] = None,
5660
+ images: Optional[Sequence[Image]] = None,
5661
+ videos: Optional[Sequence[Video]] = None,
5662
+ files: Optional[Sequence[File]] = None,
5663
+ knowledge_filters: Optional[Dict[str, Any]] = None,
5664
+ add_history_to_context: Optional[bool] = None,
5665
+ dependencies: Optional[Dict[str, Any]] = None,
5666
+ add_dependencies_to_context: Optional[bool] = None,
5667
+ add_session_state_to_context: Optional[bool] = None,
5668
+ metadata: Optional[Dict[str, Any]] = None,
5669
+ **kwargs: Any,
5670
+ ) -> RunMessages:
5671
+ """This function returns a RunMessages object with the following attributes:
5672
+ - system_message: The system message for this run
5673
+ - user_message: The user message for this run
5674
+ - messages: List of messages to send to the model
5675
+
5676
+ To build the RunMessages object:
5677
+ 1. Add system message to run_messages
5678
+ 2. Add extra messages to run_messages
5679
+ 3. Add history to run_messages
5680
+ 4. Add messages to run_messages if provided (messages parameter first)
5681
+ 5. Add user message to run_messages (message parameter second)
5682
+
5683
+ """
5684
+ # Initialize the RunMessages object
5685
+ run_messages = RunMessages()
5686
+
5687
+ # 1. Add system message to run_messages
5688
+ system_message = await self.aget_system_message(
5689
+ session=session,
5690
+ session_state=session_state,
5691
+ user_id=user_id,
5692
+ images=images,
5693
+ audio=audio,
5694
+ videos=videos,
5695
+ files=files,
5696
+ dependencies=dependencies,
5697
+ metadata=metadata,
5698
+ add_session_state_to_context=add_session_state_to_context,
5699
+ )
5700
+ if system_message is not None:
5701
+ run_messages.system_message = system_message
5702
+ run_messages.messages.append(system_message)
5703
+
5704
+ # 2. Add extra messages to run_messages if provided
5705
+ if self.additional_input is not None:
5706
+ messages_to_add_to_run_response: List[Message] = []
5707
+ if run_messages.extra_messages is None:
5708
+ run_messages.extra_messages = []
5709
+
5710
+ for _m in self.additional_input:
5711
+ if isinstance(_m, Message):
5712
+ messages_to_add_to_run_response.append(_m)
5713
+ run_messages.messages.append(_m)
5714
+ run_messages.extra_messages.append(_m)
5715
+ elif isinstance(_m, dict):
5716
+ try:
5717
+ _m_parsed = Message.model_validate(_m)
5718
+ messages_to_add_to_run_response.append(_m_parsed)
5719
+ run_messages.messages.append(_m_parsed)
5720
+ run_messages.extra_messages.append(_m_parsed)
5721
+ except Exception as e:
5722
+ log_warning(f"Failed to validate message: {e}")
5723
+ # Add the extra messages to the run_response
5724
+ if len(messages_to_add_to_run_response) > 0:
5725
+ log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
5726
+ if run_response.additional_input is None:
5727
+ run_response.additional_input = messages_to_add_to_run_response
5728
+ else:
5729
+ run_response.additional_input.extend(messages_to_add_to_run_response)
5730
+
5731
+ # 3. Add history to run_messages
5732
+ if add_history_to_context:
5733
+ from copy import deepcopy
5734
+
5735
+ history = session.get_messages_from_last_n_runs(
5736
+ last_n=self.num_history_runs,
5737
+ skip_role=self.system_message_role,
5738
+ team_id=self.id,
5739
+ )
5740
+
5741
+ if len(history) > 0:
5742
+ # Create a deep copy of the history messages to avoid modifying the original messages
5743
+ history_copy = [deepcopy(msg) for msg in history]
5744
+
5745
+ # Tag each message as coming from history
5746
+ for _msg in history_copy:
5747
+ _msg.from_history = True
5748
+
5749
+ log_debug(f"Adding {len(history_copy)} messages from history")
5750
+
5751
+ # Extend the messages with the history
5752
+ run_messages.messages += history_copy
5753
+
5754
+ # 5. Add user message to run_messages (message second as per Dirk's requirement)
5755
+ user_message: Optional[Message] = None
5756
+ # 5.1 Build user message if message is None, str or list
5757
+ user_message = self._get_user_message(
5758
+ run_response=run_response,
5759
+ session_state=session_state,
5760
+ input_message=input_message,
5761
+ user_id=user_id,
5762
+ audio=audio,
5763
+ images=images,
5764
+ videos=videos,
5765
+ files=files,
5766
+ knowledge_filters=knowledge_filters,
5767
+ dependencies=dependencies,
5768
+ add_dependencies_to_context=add_dependencies_to_context,
5769
+ metadata=metadata,
5770
+ **kwargs,
5771
+ )
5772
+ # Add user message to run_messages
5773
+ if user_message is not None:
5774
+ run_messages.user_message = user_message
5775
+ run_messages.messages.append(user_message)
5776
+
5777
+ return run_messages
5778
+
5166
5779
  def _get_user_message(
5167
5780
  self,
5168
5781
  *,
@@ -5662,6 +6275,7 @@ class Team:
5662
6275
  if self.db is None:
5663
6276
  return "Previous session messages not available"
5664
6277
 
6278
+ self.db = cast(BaseDb, self.db)
5665
6279
  selected_sessions = self.db.get_sessions(
5666
6280
  session_type=SessionType.TEAM,
5667
6281
  limit=num_history_sessions,
@@ -5701,7 +6315,62 @@ class Team:
5701
6315
 
5702
6316
  return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
5703
6317
 
5704
- return get_previous_session_messages
6318
+ async def aget_previous_session_messages() -> str:
6319
+ """Use this function to retrieve messages from previous chat sessions.
6320
+ USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
6321
+
6322
+ Returns:
6323
+ str: JSON formatted list of message pairs from previous sessions
6324
+ """
6325
+ import json
6326
+
6327
+ if self.db is None:
6328
+ return "Previous session messages not available"
6329
+
6330
+ self.db = cast(AsyncBaseDb, self.db)
6331
+ selected_sessions = await self.db.get_sessions(
6332
+ session_type=SessionType.TEAM,
6333
+ limit=num_history_sessions,
6334
+ user_id=user_id,
6335
+ sort_by="created_at",
6336
+ sort_order="desc",
6337
+ )
6338
+
6339
+ all_messages = []
6340
+ seen_message_pairs = set()
6341
+
6342
+ for session in selected_sessions:
6343
+ if isinstance(session, TeamSession) and session.runs:
6344
+ message_count = 0
6345
+ for run in session.runs:
6346
+ messages = run.messages
6347
+ if messages is not None:
6348
+ for i in range(0, len(messages) - 1, 2):
6349
+ if i + 1 < len(messages):
6350
+ try:
6351
+ user_msg = messages[i]
6352
+ assistant_msg = messages[i + 1]
6353
+ user_content = user_msg.content
6354
+ assistant_content = assistant_msg.content
6355
+ if user_content is None or assistant_content is None:
6356
+ continue # Skip this pair if either message has no content
6357
+
6358
+ msg_pair_id = f"{user_content}:{assistant_content}"
6359
+ if msg_pair_id not in seen_message_pairs:
6360
+ seen_message_pairs.add(msg_pair_id)
6361
+ all_messages.append(Message.model_validate(user_msg))
6362
+ all_messages.append(Message.model_validate(assistant_msg))
6363
+ message_count += 1
6364
+ except Exception as e:
6365
+ log_warning(f"Error processing message pair: {e}")
6366
+ continue
6367
+
6368
+ return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
6369
+
6370
+ if self._has_async_db():
6371
+ return aget_previous_session_messages
6372
+ else:
6373
+ return get_previous_session_messages
5705
6374
 
5706
6375
  def _get_history_for_member_agent(self, session: TeamSession, member_agent: Union[Agent, "Team"]) -> List[Message]:
5707
6376
  from copy import deepcopy
@@ -6467,6 +7136,18 @@ class Team:
6467
7136
  log_warning(f"Error getting session from db: {e}")
6468
7137
  return None
6469
7138
 
7139
+ async def _aread_session(self, session_id: str) -> Optional[TeamSession]:
7140
+ """Get a Session from the database."""
7141
+ try:
7142
+ if not self.db:
7143
+ raise ValueError("Db not initialized")
7144
+ self.db = cast(AsyncBaseDb, self.db)
7145
+ session = await self.db.get_session(session_id=session_id, session_type=SessionType.TEAM)
7146
+ return session # type: ignore
7147
+ except Exception as e:
7148
+ log_warning(f"Error getting session from db: {e}")
7149
+ return None
7150
+
6470
7151
  def _upsert_session(self, session: TeamSession) -> Optional[TeamSession]:
6471
7152
  """Upsert a Session into the database."""
6472
7153
 
@@ -6478,6 +7159,17 @@ class Team:
6478
7159
  log_warning(f"Error upserting session into db: {e}")
6479
7160
  return None
6480
7161
 
7162
+ async def _aupsert_session(self, session: TeamSession) -> Optional[TeamSession]:
7163
+ """Upsert a Session into the database."""
7164
+
7165
+ try:
7166
+ if not self.db:
7167
+ raise ValueError("Db not initialized")
7168
+ return await self.db.upsert_session(session=session) # type: ignore
7169
+ except Exception as e:
7170
+ log_warning(f"Error upserting session into db: {e}")
7171
+ return None
7172
+
6481
7173
  def get_run_output(
6482
7174
  self, run_id: str, session_id: Optional[str] = None
6483
7175
  ) -> Optional[Union[TeamRunOutput, RunOutput]]:
@@ -6567,6 +7259,47 @@ class Team:
6567
7259
 
6568
7260
  return team_session
6569
7261
 
7262
+ async def _aread_or_create_session(self, session_id: str, user_id: Optional[str] = None) -> TeamSession:
7263
+ """Load the TeamSession from storage
7264
+
7265
+ Returns:
7266
+ Optional[TeamSession]: The loaded TeamSession or None if not found.
7267
+ """
7268
+ from time import time
7269
+
7270
+ from agno.session.team import TeamSession
7271
+
7272
+ # Return existing session if we have one
7273
+ if self._team_session is not None and self._team_session.session_id == session_id:
7274
+ return self._team_session
7275
+
7276
+ # Try to load from database
7277
+ team_session = None
7278
+ if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
7279
+ if self._has_async_db():
7280
+ team_session = cast(TeamSession, await self._aread_session(session_id=session_id))
7281
+ else:
7282
+ team_session = cast(TeamSession, self._read_session(session_id=session_id))
7283
+
7284
+ # Create new session if none found
7285
+ if team_session is None:
7286
+ log_debug(f"Creating new TeamSession: {session_id}")
7287
+ team_session = TeamSession(
7288
+ session_id=session_id,
7289
+ team_id=self.id,
7290
+ user_id=user_id,
7291
+ team_data=self._get_team_data(),
7292
+ session_data={},
7293
+ metadata=self.metadata,
7294
+ created_at=int(time()),
7295
+ )
7296
+
7297
+ # Cache the session if relevant
7298
+ if team_session is not None and self.cache_session:
7299
+ self._team_session = team_session
7300
+
7301
+ return team_session
7302
+
6570
7303
  def get_session(
6571
7304
  self,
6572
7305
  session_id: Optional[str] = None,
@@ -6618,6 +7351,16 @@ class Team:
6618
7351
  self._upsert_session(session=session)
6619
7352
  log_debug(f"Created or updated TeamSession record: {session.session_id}")
6620
7353
 
7354
+ async def asave_session(self, session: TeamSession) -> None:
7355
+ """Save the TeamSession to storage"""
7356
+ if self.db is not None and self.parent_team_id is None and self.workflow_id is None:
7357
+ if session.session_data is not None and "session_state" in session.session_data:
7358
+ session.session_data["session_state"].pop("current_session_id", None) # type: ignore
7359
+ session.session_data["session_state"].pop("current_user_id", None) # type: ignore
7360
+ session.session_data["session_state"].pop("current_run_id", None) # type: ignore
7361
+ await self._aupsert_session(session=session)
7362
+ log_debug(f"Created or updated TeamSession record: {session.session_id}")
7363
+
6621
7364
  def _load_session_state(self, session: TeamSession, session_state: Dict[str, Any]) -> Dict[str, Any]:
6622
7365
  """Load and return the stored session_state from the database, optionally merging it with the given one"""
6623
7366