agno 2.3.25__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. agno/agent/__init__.py +4 -0
  2. agno/agent/agent.py +1428 -558
  3. agno/agent/remote.py +13 -0
  4. agno/db/base.py +339 -0
  5. agno/db/postgres/async_postgres.py +116 -12
  6. agno/db/postgres/postgres.py +1229 -25
  7. agno/db/postgres/schemas.py +48 -1
  8. agno/db/sqlite/async_sqlite.py +119 -4
  9. agno/db/sqlite/schemas.py +51 -0
  10. agno/db/sqlite/sqlite.py +1173 -13
  11. agno/db/utils.py +37 -1
  12. agno/knowledge/__init__.py +4 -0
  13. agno/knowledge/chunking/code.py +1 -1
  14. agno/knowledge/chunking/semantic.py +1 -1
  15. agno/knowledge/chunking/strategy.py +4 -0
  16. agno/knowledge/filesystem.py +412 -0
  17. agno/knowledge/knowledge.py +2767 -2254
  18. agno/knowledge/protocol.py +134 -0
  19. agno/knowledge/reader/arxiv_reader.py +2 -2
  20. agno/knowledge/reader/base.py +9 -7
  21. agno/knowledge/reader/csv_reader.py +5 -5
  22. agno/knowledge/reader/docx_reader.py +2 -2
  23. agno/knowledge/reader/field_labeled_csv_reader.py +2 -2
  24. agno/knowledge/reader/firecrawl_reader.py +2 -2
  25. agno/knowledge/reader/json_reader.py +2 -2
  26. agno/knowledge/reader/markdown_reader.py +2 -2
  27. agno/knowledge/reader/pdf_reader.py +5 -4
  28. agno/knowledge/reader/pptx_reader.py +2 -2
  29. agno/knowledge/reader/reader_factory.py +110 -0
  30. agno/knowledge/reader/s3_reader.py +2 -2
  31. agno/knowledge/reader/tavily_reader.py +2 -2
  32. agno/knowledge/reader/text_reader.py +2 -2
  33. agno/knowledge/reader/web_search_reader.py +2 -2
  34. agno/knowledge/reader/website_reader.py +5 -3
  35. agno/knowledge/reader/wikipedia_reader.py +2 -2
  36. agno/knowledge/reader/youtube_reader.py +2 -2
  37. agno/knowledge/utils.py +37 -29
  38. agno/learn/__init__.py +6 -0
  39. agno/learn/machine.py +35 -0
  40. agno/learn/schemas.py +82 -11
  41. agno/learn/stores/__init__.py +3 -0
  42. agno/learn/stores/decision_log.py +1156 -0
  43. agno/learn/stores/learned_knowledge.py +6 -6
  44. agno/models/anthropic/claude.py +24 -0
  45. agno/models/aws/bedrock.py +20 -0
  46. agno/models/base.py +48 -4
  47. agno/models/cohere/chat.py +25 -0
  48. agno/models/google/gemini.py +50 -5
  49. agno/models/litellm/chat.py +38 -0
  50. agno/models/openai/chat.py +7 -0
  51. agno/models/openrouter/openrouter.py +46 -0
  52. agno/models/response.py +16 -0
  53. agno/os/app.py +83 -44
  54. agno/os/middleware/__init__.py +2 -0
  55. agno/os/middleware/trailing_slash.py +27 -0
  56. agno/os/router.py +1 -0
  57. agno/os/routers/agents/router.py +29 -16
  58. agno/os/routers/agents/schema.py +6 -4
  59. agno/os/routers/components/__init__.py +3 -0
  60. agno/os/routers/components/components.py +466 -0
  61. agno/os/routers/evals/schemas.py +4 -3
  62. agno/os/routers/health.py +3 -3
  63. agno/os/routers/knowledge/knowledge.py +3 -3
  64. agno/os/routers/memory/schemas.py +4 -2
  65. agno/os/routers/metrics/metrics.py +9 -11
  66. agno/os/routers/metrics/schemas.py +10 -6
  67. agno/os/routers/registry/__init__.py +3 -0
  68. agno/os/routers/registry/registry.py +337 -0
  69. agno/os/routers/teams/router.py +20 -8
  70. agno/os/routers/teams/schema.py +6 -4
  71. agno/os/routers/traces/traces.py +5 -5
  72. agno/os/routers/workflows/router.py +38 -11
  73. agno/os/routers/workflows/schema.py +1 -1
  74. agno/os/schema.py +92 -26
  75. agno/os/utils.py +133 -16
  76. agno/reasoning/anthropic.py +2 -2
  77. agno/reasoning/azure_ai_foundry.py +2 -2
  78. agno/reasoning/deepseek.py +2 -2
  79. agno/reasoning/default.py +6 -7
  80. agno/reasoning/gemini.py +2 -2
  81. agno/reasoning/helpers.py +6 -7
  82. agno/reasoning/manager.py +4 -10
  83. agno/reasoning/ollama.py +2 -2
  84. agno/reasoning/openai.py +2 -2
  85. agno/reasoning/vertexai.py +2 -2
  86. agno/registry/__init__.py +3 -0
  87. agno/registry/registry.py +68 -0
  88. agno/run/agent.py +57 -0
  89. agno/run/base.py +7 -0
  90. agno/run/team.py +57 -0
  91. agno/skills/agent_skills.py +10 -3
  92. agno/team/__init__.py +3 -1
  93. agno/team/team.py +1276 -326
  94. agno/tools/duckduckgo.py +25 -71
  95. agno/tools/exa.py +0 -21
  96. agno/tools/function.py +35 -83
  97. agno/tools/knowledge.py +9 -4
  98. agno/tools/mem0.py +11 -10
  99. agno/tools/memory.py +47 -46
  100. agno/tools/parallel.py +0 -7
  101. agno/tools/reasoning.py +30 -23
  102. agno/tools/tavily.py +4 -1
  103. agno/tools/websearch.py +93 -0
  104. agno/tools/website.py +1 -1
  105. agno/tools/wikipedia.py +1 -1
  106. agno/tools/workflow.py +48 -47
  107. agno/utils/agent.py +42 -5
  108. agno/utils/events.py +160 -2
  109. agno/utils/print_response/agent.py +0 -31
  110. agno/utils/print_response/team.py +0 -2
  111. agno/utils/print_response/workflow.py +0 -2
  112. agno/utils/team.py +61 -11
  113. agno/vectordb/lancedb/lance_db.py +4 -1
  114. agno/vectordb/mongodb/mongodb.py +1 -1
  115. agno/vectordb/qdrant/qdrant.py +4 -4
  116. agno/workflow/__init__.py +3 -1
  117. agno/workflow/condition.py +0 -21
  118. agno/workflow/loop.py +0 -21
  119. agno/workflow/parallel.py +0 -21
  120. agno/workflow/router.py +0 -21
  121. agno/workflow/step.py +117 -24
  122. agno/workflow/steps.py +0 -21
  123. agno/workflow/workflow.py +625 -63
  124. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/METADATA +46 -76
  125. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/RECORD +128 -117
  126. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/WHEEL +0 -0
  127. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/licenses/LICENSE +0 -0
  128. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/top_level.txt +0 -0
agno/agent/agent.py CHANGED
@@ -9,7 +9,6 @@ from concurrent.futures import Future
9
9
  from dataclasses import dataclass
10
10
  from inspect import iscoroutinefunction
11
11
  from os import getenv
12
- from textwrap import dedent
13
12
  from typing import (
14
13
  Any,
15
14
  AsyncIterator,
@@ -34,8 +33,9 @@ from pydantic import BaseModel
34
33
 
35
34
  from agno.compression.manager import CompressionManager
36
35
  from agno.culture.manager import CultureManager
37
- from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
36
+ from agno.db.base import AsyncBaseDb, BaseDb, ComponentType, SessionType, UserMemory
38
37
  from agno.db.schemas.culture import CulturalKnowledge
38
+ from agno.db.utils import db_from_dict
39
39
  from agno.eval.base import BaseEval
40
40
  from agno.exceptions import (
41
41
  InputCheckError,
@@ -44,7 +44,7 @@ from agno.exceptions import (
44
44
  )
45
45
  from agno.filters import FilterExpr
46
46
  from agno.guardrails import BaseGuardrail
47
- from agno.knowledge.knowledge import Knowledge
47
+ from agno.knowledge.protocol import KnowledgeProtocol
48
48
  from agno.knowledge.types import KnowledgeFilter
49
49
  from agno.learn.machine import LearningMachine
50
50
  from agno.media import Audio, File, Image, Video
@@ -55,6 +55,7 @@ from agno.models.metrics import Metrics
55
55
  from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
56
56
  from agno.models.utils import get_model
57
57
  from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
58
+ from agno.registry.registry import Registry
58
59
  from agno.run import RunContext, RunStatus
59
60
  from agno.run.agent import (
60
61
  RunEvent,
@@ -121,6 +122,10 @@ from agno.utils.agent import (
121
122
  from agno.utils.common import is_typed_dict
122
123
  from agno.utils.events import (
123
124
  add_error_event,
125
+ create_compression_completed_event,
126
+ create_compression_started_event,
127
+ create_model_request_completed_event,
128
+ create_model_request_started_event,
124
129
  create_parser_model_response_completed_event,
125
130
  create_parser_model_response_started_event,
126
131
  create_post_hook_completed_event,
@@ -235,7 +240,9 @@ class Agent:
235
240
  # Enable the agent to manage memories of the user
236
241
  enable_agentic_memory: bool = False
237
242
  # If True, the agent creates/updates user memories at the end of runs
238
- enable_user_memories: bool = False
243
+ update_memory_on_run: bool = False
244
+ # Soon to be deprecated. Use update_memory_on_run
245
+ enable_user_memories: Optional[bool] = None
239
246
  # If True, the agent adds a reference to the user memories in the response
240
247
  add_memories_to_context: Optional[bool] = None
241
248
 
@@ -254,7 +261,7 @@ class Agent:
254
261
  max_tool_calls_from_history: Optional[int] = None
255
262
 
256
263
  # --- Knowledge ---
257
- knowledge: Optional[Knowledge] = None
264
+ knowledge: Optional[KnowledgeProtocol] = None
258
265
  # Enable RAG by adding references from Knowledge to the user prompt.
259
266
  # Add knowledge_filters to the Agent class attributes
260
267
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
@@ -341,6 +348,8 @@ class Agent:
341
348
  description: Optional[str] = None
342
349
  # List of instructions for the agent.
343
350
  instructions: Optional[Union[str, List[str], Callable]] = None
351
+ # If True, wrap instructions in <instructions> tags. Default is False.
352
+ use_instruction_tags: bool = False
344
353
  # Provide the expected output from the Agent.
345
354
  expected_output: Optional[str] = None
346
355
  # Additional context added to the end of the system message.
@@ -459,9 +468,6 @@ class Agent:
459
468
  # This helps us improve the Agent and provide better support
460
469
  telemetry: bool = True
461
470
 
462
- # Deprecated. Use stream_events instead
463
- stream_intermediate_steps: Optional[bool] = None
464
-
465
471
  def __init__(
466
472
  self,
467
473
  *,
@@ -482,7 +488,8 @@ class Agent:
482
488
  db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
483
489
  memory_manager: Optional[MemoryManager] = None,
484
490
  enable_agentic_memory: bool = False,
485
- enable_user_memories: bool = False,
491
+ update_memory_on_run: bool = False,
492
+ enable_user_memories: Optional[bool] = None, # Soon to be deprecated. Use update_memory_on_run
486
493
  add_memories_to_context: Optional[bool] = None,
487
494
  enable_session_summaries: bool = False,
488
495
  add_session_summary_to_context: Optional[bool] = None,
@@ -496,7 +503,7 @@ class Agent:
496
503
  store_media: bool = True,
497
504
  store_tool_messages: bool = True,
498
505
  store_history_messages: bool = True,
499
- knowledge: Optional[Knowledge] = None,
506
+ knowledge: Optional[KnowledgeProtocol] = None,
500
507
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
501
508
  enable_agentic_knowledge_filters: Optional[bool] = None,
502
509
  add_knowledge_to_context: bool = False,
@@ -526,6 +533,7 @@ class Agent:
526
533
  build_context: bool = True,
527
534
  description: Optional[str] = None,
528
535
  instructions: Optional[Union[str, List[str], Callable]] = None,
536
+ use_instruction_tags: bool = False,
529
537
  expected_output: Optional[str] = None,
530
538
  additional_context: Optional[str] = None,
531
539
  markdown: bool = False,
@@ -554,7 +562,6 @@ class Agent:
554
562
  save_response_to_file: Optional[str] = None,
555
563
  stream: Optional[bool] = None,
556
564
  stream_events: Optional[bool] = None,
557
- stream_intermediate_steps: Optional[bool] = None,
558
565
  store_events: bool = False,
559
566
  events_to_skip: Optional[List[RunEvent]] = None,
560
567
  role: Optional[str] = None,
@@ -589,7 +596,13 @@ class Agent:
589
596
 
590
597
  self.memory_manager = memory_manager
591
598
  self.enable_agentic_memory = enable_agentic_memory
592
- self.enable_user_memories = enable_user_memories
599
+
600
+ if enable_user_memories is not None:
601
+ self.update_memory_on_run = enable_user_memories
602
+ else:
603
+ self.update_memory_on_run = update_memory_on_run
604
+ self.enable_user_memories = self.update_memory_on_run # Soon to be deprecated. Use update_memory_on_run
605
+
593
606
  self.add_memories_to_context = add_memories_to_context
594
607
 
595
608
  self.enable_session_summaries = enable_session_summaries
@@ -654,9 +667,9 @@ class Agent:
654
667
  self.system_message = system_message
655
668
  self.system_message_role = system_message_role
656
669
  self.build_context = build_context
657
-
658
670
  self.description = description
659
671
  self.instructions = instructions
672
+ self.use_instruction_tags = use_instruction_tags
660
673
  self.expected_output = expected_output
661
674
  self.additional_context = additional_context
662
675
  self.markdown = markdown
@@ -688,14 +701,7 @@ class Agent:
688
701
  self.save_response_to_file = save_response_to_file
689
702
 
690
703
  self.stream = stream
691
-
692
- if stream_intermediate_steps is not None:
693
- warnings.warn(
694
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
695
- DeprecationWarning,
696
- stacklevel=2,
697
- )
698
- self.stream_events = stream_events or stream_intermediate_steps
704
+ self.stream_events = stream_events
699
705
 
700
706
  self.store_events = store_events
701
707
  self.role = role
@@ -823,7 +829,7 @@ class Agent:
823
829
 
824
830
  if self.add_memories_to_context is None:
825
831
  self.add_memories_to_context = (
826
- self.enable_user_memories or self.enable_agentic_memory or self.memory_manager is not None
832
+ self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None
827
833
  )
828
834
 
829
835
  def _set_learning_machine(self) -> None:
@@ -916,7 +922,7 @@ class Agent:
916
922
  self._set_default_model()
917
923
  self._set_debug(debug_mode=debug_mode)
918
924
  self.set_id()
919
- if self.enable_user_memories or self.enable_agentic_memory or self.memory_manager is not None:
925
+ if self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None:
920
926
  self._set_memory_manager()
921
927
  if (
922
928
  self.add_culture_to_context
@@ -1020,22 +1026,6 @@ class Agent:
1020
1026
 
1021
1027
  return session_id, user_id
1022
1028
 
1023
- def _initialize_session_state(
1024
- self,
1025
- session_state: Dict[str, Any],
1026
- user_id: Optional[str] = None,
1027
- session_id: Optional[str] = None,
1028
- run_id: Optional[str] = None,
1029
- ) -> Dict[str, Any]:
1030
- """Initialize the session state for the agent."""
1031
- if user_id:
1032
- session_state["current_user_id"] = user_id
1033
- if session_id is not None:
1034
- session_state["current_session_id"] = session_id
1035
- if run_id is not None:
1036
- session_state["current_run_id"] = run_id
1037
- return session_state
1038
-
1039
1029
  def _run(
1040
1030
  self,
1041
1031
  run_response: RunOutput,
@@ -1161,7 +1151,9 @@ class Agent:
1161
1151
  raise_if_cancelled(run_response.run_id) # type: ignore
1162
1152
 
1163
1153
  # 5. Reason about the task
1164
- self._handle_reasoning(run_response=run_response, run_messages=run_messages)
1154
+ self._handle_reasoning(
1155
+ run_response=run_response, run_messages=run_messages, run_context=run_context
1156
+ )
1165
1157
 
1166
1158
  # Check for cancellation before model call
1167
1159
  raise_if_cancelled(run_response.run_id) # type: ignore
@@ -1467,6 +1459,7 @@ class Agent:
1467
1459
  yield from self._handle_reasoning_stream(
1468
1460
  run_response=run_response,
1469
1461
  run_messages=run_messages,
1462
+ run_context=run_context,
1470
1463
  stream_events=stream_events,
1471
1464
  )
1472
1465
 
@@ -1541,6 +1534,7 @@ class Agent:
1541
1534
  run_response=run_response,
1542
1535
  events_to_skip=self.events_to_skip,
1543
1536
  store_events=self.store_events,
1537
+ get_memories_callback=lambda: self.get_user_memories(user_id=user_id),
1544
1538
  )
1545
1539
 
1546
1540
  # Handle the paused run
@@ -1579,6 +1573,9 @@ class Agent:
1579
1573
  learning_future=learning_future, # type: ignore
1580
1574
  stream_events=stream_events,
1581
1575
  run_response=run_response,
1576
+ events_to_skip=self.events_to_skip,
1577
+ store_events=self.store_events,
1578
+ get_memories_callback=lambda: self.get_user_memories(user_id=user_id),
1582
1579
  )
1583
1580
 
1584
1581
  # 9. Create session summary
@@ -1742,7 +1739,6 @@ class Agent:
1742
1739
  *,
1743
1740
  stream: Literal[False] = False,
1744
1741
  stream_events: Optional[bool] = None,
1745
- stream_intermediate_steps: Optional[bool] = None,
1746
1742
  user_id: Optional[str] = None,
1747
1743
  session_id: Optional[str] = None,
1748
1744
  session_state: Optional[Dict[str, Any]] = None,
@@ -1770,7 +1766,6 @@ class Agent:
1770
1766
  *,
1771
1767
  stream: Literal[True] = True,
1772
1768
  stream_events: Optional[bool] = None,
1773
- stream_intermediate_steps: Optional[bool] = None,
1774
1769
  user_id: Optional[str] = None,
1775
1770
  session_id: Optional[str] = None,
1776
1771
  session_state: Optional[Dict[str, Any]] = None,
@@ -1787,7 +1782,6 @@ class Agent:
1787
1782
  dependencies: Optional[Dict[str, Any]] = None,
1788
1783
  metadata: Optional[Dict[str, Any]] = None,
1789
1784
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
1790
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
1791
1785
  yield_run_output: bool = False,
1792
1786
  debug_mode: Optional[bool] = None,
1793
1787
  **kwargs: Any,
@@ -1799,7 +1793,6 @@ class Agent:
1799
1793
  *,
1800
1794
  stream: Optional[bool] = None,
1801
1795
  stream_events: Optional[bool] = None,
1802
- stream_intermediate_steps: Optional[bool] = None,
1803
1796
  user_id: Optional[str] = None,
1804
1797
  session_id: Optional[str] = None,
1805
1798
  session_state: Optional[Dict[str, Any]] = None,
@@ -1816,7 +1809,6 @@ class Agent:
1816
1809
  dependencies: Optional[Dict[str, Any]] = None,
1817
1810
  metadata: Optional[Dict[str, Any]] = None,
1818
1811
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
1819
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
1820
1812
  yield_run_output: Optional[bool] = None,
1821
1813
  debug_mode: Optional[bool] = None,
1822
1814
  **kwargs: Any,
@@ -1838,13 +1830,6 @@ class Agent:
1838
1830
  "add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
1839
1831
  )
1840
1832
 
1841
- if yield_run_response is not None:
1842
- warnings.warn(
1843
- "The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
1844
- DeprecationWarning,
1845
- stacklevel=2,
1846
- )
1847
-
1848
1833
  background_tasks = kwargs.pop("background_tasks", None)
1849
1834
  if background_tasks is not None:
1850
1835
  from fastapi import BackgroundTasks
@@ -1884,14 +1869,8 @@ class Agent:
1884
1869
  agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
1885
1870
  self._update_metadata(session=agent_session)
1886
1871
 
1887
- # Initialize session state
1888
- session_state = self._initialize_session_state(
1889
- session_state=session_state if session_state is not None else {},
1890
- user_id=user_id,
1891
- session_id=session_id,
1892
- run_id=run_id,
1893
- )
1894
- # Update session state from DB
1872
+ # Initialize session state. Get it from DB if relevant.
1873
+ session_state = session_state if session_state is not None else {}
1895
1874
  session_state = self._load_session_state(session=agent_session, session_state=session_state)
1896
1875
 
1897
1876
  # Determine runtime dependencies
@@ -1935,15 +1914,6 @@ class Agent:
1935
1914
  if stream is None:
1936
1915
  stream = False if self.stream is None else self.stream
1937
1916
 
1938
- # Considering both stream_events and stream_intermediate_steps (deprecated)
1939
- if stream_intermediate_steps is not None:
1940
- warnings.warn(
1941
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
1942
- DeprecationWarning,
1943
- stacklevel=2,
1944
- )
1945
- stream_events = stream_events or stream_intermediate_steps
1946
-
1947
1917
  # Can't stream events if streaming is disabled
1948
1918
  if stream is False:
1949
1919
  stream_events = False
@@ -1978,8 +1948,6 @@ class Agent:
1978
1948
  run_response.metrics = Metrics()
1979
1949
  run_response.metrics.start_timer()
1980
1950
 
1981
- yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
1982
-
1983
1951
  if stream:
1984
1952
  response_iterator = self._run_stream(
1985
1953
  run_response=run_response,
@@ -2068,18 +2036,12 @@ class Agent:
2068
2036
 
2069
2037
  # 2. Update metadata and session state
2070
2038
  self._update_metadata(session=agent_session)
2071
- # Initialize session state
2072
- run_context.session_state = self._initialize_session_state(
2039
+
2040
+ # Initialize session state. Get it from DB if relevant.
2041
+ run_context.session_state = self._load_session_state(
2042
+ session=agent_session,
2073
2043
  session_state=run_context.session_state if run_context.session_state is not None else {},
2074
- user_id=user_id,
2075
- session_id=session_id,
2076
- run_id=run_response.run_id,
2077
2044
  )
2078
- # Update session state from DB
2079
- if run_context.session_state is not None:
2080
- run_context.session_state = self._load_session_state(
2081
- session=agent_session, session_state=run_context.session_state
2082
- )
2083
2045
 
2084
2046
  # 3. Resolve dependencies
2085
2047
  if run_context.dependencies is not None:
@@ -2168,7 +2130,9 @@ class Agent:
2168
2130
  await araise_if_cancelled(run_response.run_id) # type: ignore
2169
2131
 
2170
2132
  # 8. Reason about the task if reasoning is enabled
2171
- await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
2133
+ await self._ahandle_reasoning(
2134
+ run_response=run_response, run_messages=run_messages, run_context=run_context
2135
+ )
2172
2136
 
2173
2137
  # Check for cancellation before model call
2174
2138
  await araise_if_cancelled(run_response.run_id) # type: ignore
@@ -2435,18 +2399,12 @@ class Agent:
2435
2399
 
2436
2400
  # 2. Update metadata and session state
2437
2401
  self._update_metadata(session=agent_session)
2438
- # Initialize session state
2439
- run_context.session_state = self._initialize_session_state(
2402
+
2403
+ # Initialize session state. Get it from DB if relevant.
2404
+ run_context.session_state = self._load_session_state(
2405
+ session=agent_session,
2440
2406
  session_state=run_context.session_state if run_context.session_state is not None else {},
2441
- user_id=user_id,
2442
- session_id=session_id,
2443
- run_id=run_response.run_id,
2444
2407
  )
2445
- # Update session state from DB
2446
- if run_context.session_state is not None:
2447
- run_context.session_state = self._load_session_state(
2448
- session=agent_session, session_state=run_context.session_state
2449
- )
2450
2408
 
2451
2409
  # 3. Resolve dependencies
2452
2410
  if run_context.dependencies is not None:
@@ -2535,6 +2493,7 @@ class Agent:
2535
2493
  async for item in self._ahandle_reasoning_stream(
2536
2494
  run_response=run_response,
2537
2495
  run_messages=run_messages,
2496
+ run_context=run_context,
2538
2497
  stream_events=stream_events,
2539
2498
  ):
2540
2499
  await araise_if_cancelled(run_response.run_id) # type: ignore
@@ -2620,6 +2579,9 @@ class Agent:
2620
2579
  learning_task=learning_task,
2621
2580
  stream_events=stream_events,
2622
2581
  run_response=run_response,
2582
+ events_to_skip=self.events_to_skip,
2583
+ store_events=self.store_events,
2584
+ get_memories_callback=lambda: self.aget_user_memories(user_id=user_id),
2623
2585
  ):
2624
2586
  yield item
2625
2587
 
@@ -2653,6 +2615,7 @@ class Agent:
2653
2615
  run_response=run_response,
2654
2616
  events_to_skip=self.events_to_skip,
2655
2617
  store_events=self.store_events,
2618
+ get_memories_callback=lambda: self.aget_user_memories(user_id=user_id),
2656
2619
  ):
2657
2620
  yield item
2658
2621
 
@@ -2870,7 +2833,6 @@ class Agent:
2870
2833
  videos: Optional[Sequence[Video]] = None,
2871
2834
  files: Optional[Sequence[File]] = None,
2872
2835
  stream_events: Optional[bool] = None,
2873
- stream_intermediate_steps: Optional[bool] = None,
2874
2836
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
2875
2837
  add_history_to_context: Optional[bool] = None,
2876
2838
  add_dependencies_to_context: Optional[bool] = None,
@@ -2897,7 +2859,6 @@ class Agent:
2897
2859
  videos: Optional[Sequence[Video]] = None,
2898
2860
  files: Optional[Sequence[File]] = None,
2899
2861
  stream_events: Optional[bool] = None,
2900
- stream_intermediate_steps: Optional[bool] = None,
2901
2862
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
2902
2863
  add_history_to_context: Optional[bool] = None,
2903
2864
  add_dependencies_to_context: Optional[bool] = None,
@@ -2905,7 +2866,6 @@ class Agent:
2905
2866
  dependencies: Optional[Dict[str, Any]] = None,
2906
2867
  metadata: Optional[Dict[str, Any]] = None,
2907
2868
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
2908
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
2909
2869
  yield_run_output: Optional[bool] = None,
2910
2870
  debug_mode: Optional[bool] = None,
2911
2871
  **kwargs: Any,
@@ -2926,7 +2886,6 @@ class Agent:
2926
2886
  videos: Optional[Sequence[Video]] = None,
2927
2887
  files: Optional[Sequence[File]] = None,
2928
2888
  stream_events: Optional[bool] = None,
2929
- stream_intermediate_steps: Optional[bool] = None,
2930
2889
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
2931
2890
  add_history_to_context: Optional[bool] = None,
2932
2891
  add_dependencies_to_context: Optional[bool] = None,
@@ -2934,7 +2893,6 @@ class Agent:
2934
2893
  dependencies: Optional[Dict[str, Any]] = None,
2935
2894
  metadata: Optional[Dict[str, Any]] = None,
2936
2895
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
2937
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
2938
2896
  yield_run_output: Optional[bool] = None,
2939
2897
  debug_mode: Optional[bool] = None,
2940
2898
  **kwargs: Any,
@@ -2949,13 +2907,6 @@ class Agent:
2949
2907
  "add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
2950
2908
  )
2951
2909
 
2952
- if yield_run_response is not None:
2953
- warnings.warn(
2954
- "The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
2955
- DeprecationWarning,
2956
- stacklevel=2,
2957
- )
2958
-
2959
2910
  background_tasks = kwargs.pop("background_tasks", None)
2960
2911
  if background_tasks is not None:
2961
2912
  from fastapi import BackgroundTasks
@@ -3008,15 +2959,6 @@ class Agent:
3008
2959
  if stream is None:
3009
2960
  stream = False if self.stream is None else self.stream
3010
2961
 
3011
- # Considering both stream_events and stream_intermediate_steps (deprecated)
3012
- if stream_intermediate_steps is not None:
3013
- warnings.warn(
3014
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
3015
- DeprecationWarning,
3016
- stacklevel=2,
3017
- )
3018
- stream_events = stream_events or stream_intermediate_steps
3019
-
3020
2962
  # Can't stream events if streaming is disabled
3021
2963
  if stream is False:
3022
2964
  stream_events = False
@@ -3078,7 +3020,7 @@ class Agent:
3078
3020
  run_response.metrics = Metrics()
3079
3021
  run_response.metrics.start_timer()
3080
3022
 
3081
- yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
3023
+ yield_run_output = yield_run_output
3082
3024
 
3083
3025
  # Pass the new run_response to _arun
3084
3026
  if stream:
@@ -3122,7 +3064,6 @@ class Agent:
3122
3064
  requirements: Optional[List[RunRequirement]] = None,
3123
3065
  stream: Literal[False] = False,
3124
3066
  stream_events: Optional[bool] = None,
3125
- stream_intermediate_steps: Optional[bool] = None,
3126
3067
  user_id: Optional[str] = None,
3127
3068
  session_id: Optional[str] = None,
3128
3069
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
@@ -3142,7 +3083,6 @@ class Agent:
3142
3083
  requirements: Optional[List[RunRequirement]] = None,
3143
3084
  stream: Literal[True] = True,
3144
3085
  stream_events: Optional[bool] = False,
3145
- stream_intermediate_steps: Optional[bool] = None,
3146
3086
  user_id: Optional[str] = None,
3147
3087
  session_id: Optional[str] = None,
3148
3088
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
@@ -3161,7 +3101,6 @@ class Agent:
3161
3101
  requirements: Optional[List[RunRequirement]] = None,
3162
3102
  stream: Optional[bool] = None,
3163
3103
  stream_events: Optional[bool] = False,
3164
- stream_intermediate_steps: Optional[bool] = None,
3165
3104
  user_id: Optional[str] = None,
3166
3105
  session_id: Optional[str] = None,
3167
3106
  run_context: Optional[RunContext] = None,
@@ -3187,8 +3126,6 @@ class Agent:
3187
3126
  dependencies: The dependencies to use for the run.
3188
3127
  metadata: The metadata to use for the run.
3189
3128
  debug_mode: Whether to enable debug mode.
3190
- (deprecated) stream_intermediate_steps: Whether to stream all steps.
3191
- (deprecated) updated_tools: Use 'requirements' instead.
3192
3129
  """
3193
3130
  if run_response is None and run_id is None:
3194
3131
  raise ValueError("Either run_response or run_id must be provided.")
@@ -3219,12 +3156,8 @@ class Agent:
3219
3156
  agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
3220
3157
  self._update_metadata(session=agent_session)
3221
3158
 
3222
- # Initialize session state
3223
- session_state = self._initialize_session_state(
3224
- session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
3225
- )
3226
- # Update session state from DB
3227
- session_state = self._load_session_state(session=agent_session, session_state=session_state)
3159
+ # Initialize session state. Get it from DB if relevant.
3160
+ session_state = self._load_session_state(session=agent_session, session_state={})
3228
3161
 
3229
3162
  dependencies = dependencies if dependencies is not None else self.dependencies
3230
3163
 
@@ -3257,15 +3190,6 @@ class Agent:
3257
3190
  if stream is None:
3258
3191
  stream = False if self.stream is None else self.stream
3259
3192
 
3260
- # Considering both stream_events and stream_intermediate_steps (deprecated)
3261
- if stream_intermediate_steps is not None:
3262
- warnings.warn(
3263
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
3264
- DeprecationWarning,
3265
- stacklevel=2,
3266
- )
3267
- stream_events = stream_events or stream_intermediate_steps
3268
-
3269
3193
  # Can't stream events if streaming is disabled
3270
3194
  if stream is False:
3271
3195
  stream_events = False
@@ -3273,10 +3197,6 @@ class Agent:
3273
3197
  if stream_events is None:
3274
3198
  stream_events = False if self.stream_events is None else self.stream_events
3275
3199
 
3276
- # Can't stream events if streaming is disabled
3277
- if stream is False:
3278
- stream_events = False
3279
-
3280
3200
  # Run can be continued from previous run response or from passed run_response context
3281
3201
  if run_response is not None:
3282
3202
  # The run is continued from a provided run_response. This contains the updated tools.
@@ -3817,7 +3737,6 @@ class Agent:
3817
3737
  *,
3818
3738
  stream: Literal[False] = False,
3819
3739
  stream_events: Optional[bool] = None,
3820
- stream_intermediate_steps: Optional[bool] = None,
3821
3740
  run_id: Optional[str] = None,
3822
3741
  updated_tools: Optional[List[ToolExecution]] = None,
3823
3742
  requirements: Optional[List[RunRequirement]] = None,
@@ -3837,7 +3756,6 @@ class Agent:
3837
3756
  *,
3838
3757
  stream: Literal[True] = True,
3839
3758
  stream_events: Optional[bool] = None,
3840
- stream_intermediate_steps: Optional[bool] = None,
3841
3759
  run_id: Optional[str] = None,
3842
3760
  updated_tools: Optional[List[ToolExecution]] = None,
3843
3761
  requirements: Optional[List[RunRequirement]] = None,
@@ -3859,7 +3777,6 @@ class Agent:
3859
3777
  requirements: Optional[List[RunRequirement]] = None,
3860
3778
  stream: Optional[bool] = None,
3861
3779
  stream_events: Optional[bool] = None,
3862
- stream_intermediate_steps: Optional[bool] = None,
3863
3780
  user_id: Optional[str] = None,
3864
3781
  session_id: Optional[str] = None,
3865
3782
  run_context: Optional[RunContext] = None,
@@ -3887,7 +3804,6 @@ class Agent:
3887
3804
  metadata: The metadata to use for continuing the run.
3888
3805
  debug_mode: Whether to enable debug mode.
3889
3806
  yield_run_output: Whether to yield the run response.
3890
- (deprecated) stream_intermediate_steps: Whether to stream all steps.
3891
3807
  (deprecated) updated_tools: Use 'requirements' instead.
3892
3808
  """
3893
3809
  if run_response is None and run_id is None:
@@ -3923,15 +3839,6 @@ class Agent:
3923
3839
  if stream is None:
3924
3840
  stream = False if self.stream is None else self.stream
3925
3841
 
3926
- # Considering both stream_events and stream_intermediate_steps (deprecated)
3927
- if stream_intermediate_steps is not None:
3928
- warnings.warn(
3929
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
3930
- DeprecationWarning,
3931
- stacklevel=2,
3932
- )
3933
- stream_events = stream_events or stream_intermediate_steps
3934
-
3935
3842
  # Can't stream events if streaming is disabled
3936
3843
  if stream is False:
3937
3844
  stream_events = False
@@ -3939,7 +3846,7 @@ class Agent:
3939
3846
  if stream_events is None:
3940
3847
  stream_events = False if self.stream_events is None else self.stream_events
3941
3848
 
3942
- # Can't have stream_intermediate_steps if stream is False
3849
+ # Can't have stream_events if stream is False
3943
3850
  if stream is False:
3944
3851
  stream_events = False
3945
3852
 
@@ -4052,15 +3959,12 @@ class Agent:
4052
3959
 
4053
3960
  # 3. Update metadata and session state
4054
3961
  self._update_metadata(session=agent_session)
4055
- # Initialize session state
4056
- run_context.session_state = self._initialize_session_state(
4057
- session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
4058
- )
4059
- # Update session state from DB
4060
- if run_context.session_state is not None:
4061
- run_context.session_state = self._load_session_state(
4062
- session=agent_session, session_state=run_context.session_state
4063
- )
3962
+
3963
+ # Initialize session state. Get it from DB if relevant.
3964
+ run_context.session_state = self._load_session_state(
3965
+ session=agent_session,
3966
+ session_state=run_context.session_state if run_context.session_state is not None else {},
3967
+ )
4064
3968
 
4065
3969
  # 4. Prepare run response
4066
3970
  if run_response is not None:
@@ -4357,15 +4261,12 @@ class Agent:
4357
4261
 
4358
4262
  # 2. Update session state and metadata
4359
4263
  self._update_metadata(session=agent_session)
4360
- # Initialize session state
4361
- run_context.session_state = self._initialize_session_state(
4362
- session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
4363
- )
4364
- # Update session state from DB
4365
- if run_context.session_state is not None:
4366
- run_context.session_state = self._load_session_state(
4367
- session=agent_session, session_state=run_context.session_state
4368
- )
4264
+
4265
+ # Initialize session state. Get it from DB if relevant.
4266
+ run_context.session_state = self._load_session_state(
4267
+ session=agent_session,
4268
+ session_state=run_context.session_state if run_context.session_state is not None else {},
4269
+ )
4369
4270
 
4370
4271
  # 3. Resolve dependencies
4371
4272
  if run_context.dependencies is not None:
@@ -4746,9 +4647,6 @@ class Agent:
4746
4647
  "run_context": run_context,
4747
4648
  "agent": self,
4748
4649
  "session": session,
4749
- "session_state": run_context.session_state,
4750
- "dependencies": run_context.dependencies,
4751
- "metadata": run_context.metadata,
4752
4650
  "user_id": user_id,
4753
4651
  "debug_mode": debug_mode or self.debug_mode,
4754
4652
  }
@@ -4841,9 +4739,6 @@ class Agent:
4841
4739
  "agent": self,
4842
4740
  "session": session,
4843
4741
  "run_context": run_context,
4844
- "session_state": run_context.session_state,
4845
- "dependencies": run_context.dependencies,
4846
- "metadata": run_context.metadata,
4847
4742
  "user_id": user_id,
4848
4743
  "debug_mode": debug_mode or self.debug_mode,
4849
4744
  }
@@ -4939,9 +4834,6 @@ class Agent:
4939
4834
  "run_output": run_output,
4940
4835
  "agent": self,
4941
4836
  "session": session,
4942
- "session_state": run_context.session_state,
4943
- "dependencies": run_context.dependencies,
4944
- "metadata": run_context.metadata,
4945
4837
  "user_id": user_id,
4946
4838
  "run_context": run_context,
4947
4839
  "debug_mode": debug_mode or self.debug_mode,
@@ -5028,9 +4920,6 @@ class Agent:
5028
4920
  "agent": self,
5029
4921
  "session": session,
5030
4922
  "run_context": run_context,
5031
- "session_state": run_context.session_state,
5032
- "dependencies": run_context.dependencies,
5033
- "metadata": run_context.metadata,
5034
4923
  "user_id": user_id,
5035
4924
  "debug_mode": debug_mode or self.debug_mode,
5036
4925
  }
@@ -5701,6 +5590,70 @@ class Agent:
5701
5590
  send_media_to_model=self.send_media_to_model,
5702
5591
  compression_manager=self.compression_manager if self.compress_tool_results else None,
5703
5592
  ):
5593
+ # Handle LLM request events and compression events from ModelResponse
5594
+ if isinstance(model_response_event, ModelResponse):
5595
+ if model_response_event.event == ModelResponseEvent.model_request_started.value:
5596
+ if stream_events:
5597
+ yield handle_event( # type: ignore
5598
+ create_model_request_started_event(
5599
+ from_run_response=run_response,
5600
+ model=self.model.id,
5601
+ model_provider=self.model.provider,
5602
+ ),
5603
+ run_response,
5604
+ events_to_skip=self.events_to_skip, # type: ignore
5605
+ store_events=self.store_events,
5606
+ )
5607
+ continue
5608
+
5609
+ if model_response_event.event == ModelResponseEvent.model_request_completed.value:
5610
+ if stream_events:
5611
+ yield handle_event( # type: ignore
5612
+ create_model_request_completed_event(
5613
+ from_run_response=run_response,
5614
+ model=self.model.id,
5615
+ model_provider=self.model.provider,
5616
+ input_tokens=model_response_event.input_tokens,
5617
+ output_tokens=model_response_event.output_tokens,
5618
+ total_tokens=model_response_event.total_tokens,
5619
+ time_to_first_token=model_response_event.time_to_first_token,
5620
+ reasoning_tokens=model_response_event.reasoning_tokens,
5621
+ cache_read_tokens=model_response_event.cache_read_tokens,
5622
+ cache_write_tokens=model_response_event.cache_write_tokens,
5623
+ ),
5624
+ run_response,
5625
+ events_to_skip=self.events_to_skip, # type: ignore
5626
+ store_events=self.store_events,
5627
+ )
5628
+ continue
5629
+
5630
+ # Handle compression events
5631
+ if model_response_event.event == ModelResponseEvent.compression_started.value:
5632
+ if stream_events:
5633
+ yield handle_event( # type: ignore
5634
+ create_compression_started_event(from_run_response=run_response),
5635
+ run_response,
5636
+ events_to_skip=self.events_to_skip, # type: ignore
5637
+ store_events=self.store_events,
5638
+ )
5639
+ continue
5640
+
5641
+ if model_response_event.event == ModelResponseEvent.compression_completed.value:
5642
+ if stream_events:
5643
+ stats = model_response_event.compression_stats or {}
5644
+ yield handle_event( # type: ignore
5645
+ create_compression_completed_event(
5646
+ from_run_response=run_response,
5647
+ tool_results_compressed=stats.get("tool_results_compressed"),
5648
+ original_size=stats.get("original_size"),
5649
+ compressed_size=stats.get("compressed_size"),
5650
+ ),
5651
+ run_response,
5652
+ events_to_skip=self.events_to_skip, # type: ignore
5653
+ store_events=self.store_events,
5654
+ )
5655
+ continue
5656
+
5704
5657
  yield from self._handle_model_response_chunk(
5705
5658
  session=session,
5706
5659
  run_response=run_response,
@@ -5713,6 +5666,16 @@ class Agent:
5713
5666
  run_context=run_context,
5714
5667
  )
5715
5668
 
5669
+ # Update RunOutput
5670
+ # Build a list of messages that should be added to the RunOutput
5671
+ messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
5672
+ # Update the RunOutput messages
5673
+ run_response.messages = messages_for_run_response
5674
+ # Update the RunOutput metrics
5675
+ run_response.metrics = self._calculate_run_metrics(
5676
+ messages=messages_for_run_response, current_run_metrics=run_response.metrics
5677
+ )
5678
+
5716
5679
  # Determine reasoning completed
5717
5680
  if stream_events and reasoning_state["reasoning_started"]:
5718
5681
  all_reasoning_steps: List[ReasoningStep] = []
@@ -5735,16 +5698,6 @@ class Agent:
5735
5698
  store_events=self.store_events,
5736
5699
  )
5737
5700
 
5738
- # Update RunOutput
5739
- # Build a list of messages that should be added to the RunOutput
5740
- messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
5741
- # Update the RunOutput messages
5742
- run_response.messages = messages_for_run_response
5743
- # Update the RunOutput metrics
5744
- run_response.metrics = self._calculate_run_metrics(
5745
- messages=messages_for_run_response, current_run_metrics=run_response.metrics
5746
- )
5747
-
5748
5701
  # Update the run_response audio if streaming
5749
5702
  if model_response.audio is not None:
5750
5703
  run_response.response_audio = model_response.audio
@@ -5790,6 +5743,70 @@ class Agent:
5790
5743
  ) # type: ignore
5791
5744
 
5792
5745
  async for model_response_event in model_response_stream: # type: ignore
5746
+ # Handle LLM request events and compression events from ModelResponse
5747
+ if isinstance(model_response_event, ModelResponse):
5748
+ if model_response_event.event == ModelResponseEvent.model_request_started.value:
5749
+ if stream_events:
5750
+ yield handle_event( # type: ignore
5751
+ create_model_request_started_event(
5752
+ from_run_response=run_response,
5753
+ model=self.model.id,
5754
+ model_provider=self.model.provider,
5755
+ ),
5756
+ run_response,
5757
+ events_to_skip=self.events_to_skip, # type: ignore
5758
+ store_events=self.store_events,
5759
+ )
5760
+ continue
5761
+
5762
+ if model_response_event.event == ModelResponseEvent.model_request_completed.value:
5763
+ if stream_events:
5764
+ yield handle_event( # type: ignore
5765
+ create_model_request_completed_event(
5766
+ from_run_response=run_response,
5767
+ model=self.model.id,
5768
+ model_provider=self.model.provider,
5769
+ input_tokens=model_response_event.input_tokens,
5770
+ output_tokens=model_response_event.output_tokens,
5771
+ total_tokens=model_response_event.total_tokens,
5772
+ time_to_first_token=model_response_event.time_to_first_token,
5773
+ reasoning_tokens=model_response_event.reasoning_tokens,
5774
+ cache_read_tokens=model_response_event.cache_read_tokens,
5775
+ cache_write_tokens=model_response_event.cache_write_tokens,
5776
+ ),
5777
+ run_response,
5778
+ events_to_skip=self.events_to_skip, # type: ignore
5779
+ store_events=self.store_events,
5780
+ )
5781
+ continue
5782
+
5783
+ # Handle compression events
5784
+ if model_response_event.event == ModelResponseEvent.compression_started.value:
5785
+ if stream_events:
5786
+ yield handle_event( # type: ignore
5787
+ create_compression_started_event(from_run_response=run_response),
5788
+ run_response,
5789
+ events_to_skip=self.events_to_skip, # type: ignore
5790
+ store_events=self.store_events,
5791
+ )
5792
+ continue
5793
+
5794
+ if model_response_event.event == ModelResponseEvent.compression_completed.value:
5795
+ if stream_events:
5796
+ stats = model_response_event.compression_stats or {}
5797
+ yield handle_event( # type: ignore
5798
+ create_compression_completed_event(
5799
+ from_run_response=run_response,
5800
+ tool_results_compressed=stats.get("tool_results_compressed"),
5801
+ original_size=stats.get("original_size"),
5802
+ compressed_size=stats.get("compressed_size"),
5803
+ ),
5804
+ run_response,
5805
+ events_to_skip=self.events_to_skip, # type: ignore
5806
+ store_events=self.store_events,
5807
+ )
5808
+ continue
5809
+
5793
5810
  for event in self._handle_model_response_chunk(
5794
5811
  session=session,
5795
5812
  run_response=run_response,
@@ -5803,6 +5820,16 @@ class Agent:
5803
5820
  ):
5804
5821
  yield event
5805
5822
 
5823
+ # Update RunOutput
5824
+ # Build a list of messages that should be added to the RunOutput
5825
+ messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
5826
+ # Update the RunOutput messages
5827
+ run_response.messages = messages_for_run_response
5828
+ # Update the RunOutput metrics
5829
+ run_response.metrics = self._calculate_run_metrics(
5830
+ messages=messages_for_run_response, current_run_metrics=run_response.metrics
5831
+ )
5832
+
5806
5833
  if stream_events and reasoning_state["reasoning_started"]:
5807
5834
  all_reasoning_steps: List[ReasoningStep] = []
5808
5835
  if run_response and run_response.reasoning_steps:
@@ -5824,16 +5851,6 @@ class Agent:
5824
5851
  store_events=self.store_events,
5825
5852
  )
5826
5853
 
5827
- # Update RunOutput
5828
- # Build a list of messages that should be added to the RunOutput
5829
- messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
5830
- # Update the RunOutput messages
5831
- run_response.messages = messages_for_run_response
5832
- # Update the RunOutput metrics
5833
- run_response.metrics = self._calculate_run_metrics(
5834
- messages=messages_for_run_response, current_run_metrics=run_response.metrics
5835
- )
5836
-
5837
5854
  # Update the run_response audio if streaming
5838
5855
  if model_response.audio is not None:
5839
5856
  run_response.response_audio = model_response.audio
@@ -6224,7 +6241,7 @@ class Agent:
6224
6241
  user_message_str is not None
6225
6242
  and user_message_str.strip() != ""
6226
6243
  and self.memory_manager is not None
6227
- and self.enable_user_memories
6244
+ and self.update_memory_on_run
6228
6245
  ):
6229
6246
  log_debug("Managing user memories")
6230
6247
  self.memory_manager.create_user_memories( # type: ignore
@@ -6253,7 +6270,7 @@ class Agent:
6253
6270
  for msg in parsed_messages
6254
6271
  if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
6255
6272
  ]
6256
- if len(non_empty_messages) > 0 and self.memory_manager is not None and self.enable_user_memories:
6273
+ if len(non_empty_messages) > 0 and self.memory_manager is not None and self.update_memory_on_run:
6257
6274
  self.memory_manager.create_user_memories(messages=non_empty_messages, user_id=user_id, agent_id=self.id) # type: ignore
6258
6275
  else:
6259
6276
  log_warning("Unable to add messages to memory")
@@ -6270,7 +6287,7 @@ class Agent:
6270
6287
  user_message_str is not None
6271
6288
  and user_message_str.strip() != ""
6272
6289
  and self.memory_manager is not None
6273
- and self.enable_user_memories
6290
+ and self.update_memory_on_run
6274
6291
  ):
6275
6292
  log_debug("Managing user memories")
6276
6293
  await self.memory_manager.acreate_user_memories( # type: ignore
@@ -6299,7 +6316,7 @@ class Agent:
6299
6316
  for msg in parsed_messages
6300
6317
  if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
6301
6318
  ]
6302
- if len(non_empty_messages) > 0 and self.memory_manager is not None and self.enable_user_memories:
6319
+ if len(non_empty_messages) > 0 and self.memory_manager is not None and self.update_memory_on_run:
6303
6320
  await self.memory_manager.acreate_user_memories( # type: ignore
6304
6321
  messages=non_empty_messages, user_id=user_id, agent_id=self.id
6305
6322
  )
@@ -6334,7 +6351,7 @@ class Agent:
6334
6351
  if (
6335
6352
  run_messages.user_message is not None
6336
6353
  and self.memory_manager is not None
6337
- and self.enable_user_memories
6354
+ and self.update_memory_on_run
6338
6355
  and not self.enable_agentic_memory
6339
6356
  ):
6340
6357
  log_debug("Starting memory creation in background task.")
@@ -6487,7 +6504,7 @@ class Agent:
6487
6504
  if (
6488
6505
  run_messages.user_message is not None
6489
6506
  and self.memory_manager is not None
6490
- and self.enable_user_memories
6507
+ and self.update_memory_on_run
6491
6508
  and not self.enable_agentic_memory
6492
6509
  ):
6493
6510
  log_debug("Starting memory creation in background thread.")
@@ -6636,39 +6653,31 @@ class Agent:
6636
6653
  agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
6637
6654
 
6638
6655
  # Add tools for accessing knowledge
6639
- if self.knowledge is not None or self.knowledge_retriever is not None:
6640
- # Check if knowledge retriever is an async function but used in sync mode
6641
- from inspect import iscoroutinefunction
6642
-
6643
- if self.knowledge_retriever and iscoroutinefunction(self.knowledge_retriever):
6644
- log_warning(
6645
- "Async knowledge retriever function is being used with synchronous agent.run() or agent.print_response(). "
6646
- "It is recommended to use agent.arun() or agent.aprint_response() instead."
6656
+ if self.knowledge is not None and self.search_knowledge:
6657
+ # Use knowledge protocol's get_tools method
6658
+ get_tools_fn = getattr(self.knowledge, "get_tools", None)
6659
+ if callable(get_tools_fn):
6660
+ knowledge_tools = get_tools_fn(
6661
+ run_response=run_response,
6662
+ run_context=run_context,
6663
+ knowledge_filters=run_context.knowledge_filters,
6664
+ async_mode=False,
6665
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
6666
+ agent=self,
6647
6667
  )
6668
+ agent_tools.extend(knowledge_tools)
6669
+ elif self.knowledge_retriever is not None and self.search_knowledge:
6670
+ # Create search tool using custom knowledge_retriever
6671
+ agent_tools.append(
6672
+ self._create_knowledge_retriever_search_tool(
6673
+ run_response=run_response,
6674
+ run_context=run_context,
6675
+ async_mode=False,
6676
+ )
6677
+ )
6648
6678
 
6649
- if self.search_knowledge:
6650
- # Use async or sync search based on async_mode
6651
- if self.enable_agentic_knowledge_filters:
6652
- agent_tools.append(
6653
- self._search_knowledge_base_with_agentic_filters_function(
6654
- run_response=run_response,
6655
- async_mode=False,
6656
- knowledge_filters=run_context.knowledge_filters,
6657
- run_context=run_context,
6658
- )
6659
- )
6660
- else:
6661
- agent_tools.append(
6662
- self._get_search_knowledge_base_function(
6663
- run_response=run_response,
6664
- async_mode=False,
6665
- knowledge_filters=run_context.knowledge_filters,
6666
- run_context=run_context,
6667
- )
6668
- )
6669
-
6670
- if self.update_knowledge:
6671
- agent_tools.append(self.add_to_knowledge)
6679
+ if self.knowledge is not None and self.update_knowledge:
6680
+ agent_tools.append(self.add_to_knowledge)
6672
6681
 
6673
6682
  # Add tools for accessing skills
6674
6683
  if self.skills is not None:
@@ -6754,30 +6763,43 @@ class Agent:
6754
6763
  agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
6755
6764
 
6756
6765
  # Add tools for accessing knowledge
6757
- if self.knowledge is not None or self.knowledge_retriever is not None:
6758
- if self.search_knowledge:
6759
- # Use async or sync search based on async_mode
6760
- if self.enable_agentic_knowledge_filters:
6761
- agent_tools.append(
6762
- self._search_knowledge_base_with_agentic_filters_function(
6763
- run_response=run_response,
6764
- async_mode=True,
6765
- knowledge_filters=run_context.knowledge_filters,
6766
- run_context=run_context,
6767
- )
6768
- )
6769
- else:
6770
- agent_tools.append(
6771
- self._get_search_knowledge_base_function(
6772
- run_response=run_response,
6773
- async_mode=True,
6774
- knowledge_filters=run_context.knowledge_filters,
6775
- run_context=run_context,
6776
- )
6777
- )
6766
+ if self.knowledge is not None and self.search_knowledge:
6767
+ # Use knowledge protocol's get_tools method
6768
+ aget_tools_fn = getattr(self.knowledge, "aget_tools", None)
6769
+ get_tools_fn = getattr(self.knowledge, "get_tools", None)
6770
+
6771
+ if callable(aget_tools_fn):
6772
+ knowledge_tools = await aget_tools_fn(
6773
+ run_response=run_response,
6774
+ run_context=run_context,
6775
+ knowledge_filters=run_context.knowledge_filters,
6776
+ async_mode=True,
6777
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
6778
+ agent=self,
6779
+ )
6780
+ agent_tools.extend(knowledge_tools)
6781
+ elif callable(get_tools_fn):
6782
+ knowledge_tools = get_tools_fn(
6783
+ run_response=run_response,
6784
+ run_context=run_context,
6785
+ knowledge_filters=run_context.knowledge_filters,
6786
+ async_mode=True,
6787
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
6788
+ agent=self,
6789
+ )
6790
+ agent_tools.extend(knowledge_tools)
6791
+ elif self.knowledge_retriever is not None and self.search_knowledge:
6792
+ # Create search tool using custom knowledge_retriever
6793
+ agent_tools.append(
6794
+ self._create_knowledge_retriever_search_tool(
6795
+ run_response=run_response,
6796
+ run_context=run_context,
6797
+ async_mode=True,
6798
+ )
6799
+ )
6778
6800
 
6779
- if self.update_knowledge:
6780
- agent_tools.append(self.add_to_knowledge)
6801
+ if self.knowledge is not None and self.update_knowledge:
6802
+ agent_tools.append(self.add_to_knowledge)
6781
6803
 
6782
6804
  # Add tools for accessing skills
6783
6805
  if self.skills is not None:
@@ -6785,102 +6807,120 @@ class Agent:
6785
6807
 
6786
6808
  return agent_tools
6787
6809
 
6788
- def _determine_tools_for_model(
6810
+ def _parse_tools(
6789
6811
  self,
6812
+ tools: List[Union[Toolkit, Callable, Function, Dict]],
6790
6813
  model: Model,
6791
- processed_tools: List[Union[Toolkit, Callable, Function, Dict]],
6792
- run_response: RunOutput,
6793
- run_context: RunContext,
6794
- session: AgentSession,
6814
+ run_context: Optional[RunContext] = None,
6795
6815
  async_mode: bool = False,
6796
6816
  ) -> List[Union[Function, dict]]:
6797
6817
  _function_names = []
6798
6818
  _functions: List[Union[Function, dict]] = []
6799
6819
  self._tool_instructions = []
6800
6820
 
6801
- # Get Agent tools
6802
- if processed_tools is not None and len(processed_tools) > 0:
6803
- log_debug("Processing tools for model")
6804
-
6805
- # Get output_schema from run_context
6806
- output_schema = run_context.output_schema if run_context else None
6821
+ # Get output_schema from run_context
6822
+ output_schema = run_context.output_schema if run_context else None
6807
6823
 
6808
- # Check if we need strict mode for the functions for the model
6809
- strict = False
6810
- if (
6811
- output_schema is not None
6812
- and (self.structured_outputs or (not self.use_json_mode))
6813
- and model.supports_native_structured_outputs
6814
- ):
6815
- strict = True
6816
-
6817
- for tool in processed_tools:
6818
- if isinstance(tool, Dict):
6819
- # If a dict is passed, it is a builtin tool
6820
- # that is run by the model provider and not the Agent
6821
- _functions.append(tool)
6822
- log_debug(f"Included builtin tool {tool}")
6823
-
6824
- elif isinstance(tool, Toolkit):
6825
- # For each function in the toolkit and process entrypoint
6826
- toolkit_functions = tool.get_async_functions() if async_mode else tool.get_functions()
6827
- for name, _func in toolkit_functions.items():
6828
- if name in _function_names:
6829
- continue
6830
- _function_names.append(name)
6831
- _func = _func.model_copy(deep=True)
6832
- _func._agent = self
6833
- _func.process_entrypoint(strict=strict)
6834
- if strict and _func.strict is None:
6835
- _func.strict = True
6836
- if self.tool_hooks is not None:
6837
- _func.tool_hooks = self.tool_hooks
6838
- _functions.append(_func)
6839
- log_debug(f"Added tool {name} from {tool.name}")
6840
-
6841
- # Add instructions from the toolkit
6842
- if tool.add_instructions and tool.instructions is not None:
6843
- self._tool_instructions.append(tool.instructions)
6844
-
6845
- elif isinstance(tool, Function):
6846
- if tool.name in _function_names:
6824
+ # Check if we need strict mode for the functions for the model
6825
+ strict = False
6826
+ if (
6827
+ output_schema is not None
6828
+ and (self.structured_outputs or (not self.use_json_mode))
6829
+ and model.supports_native_structured_outputs
6830
+ ):
6831
+ strict = True
6832
+
6833
+ for tool in tools:
6834
+ if isinstance(tool, Dict):
6835
+ # If a dict is passed, it is a builtin tool
6836
+ # that is run by the model provider and not the Agent
6837
+ _functions.append(tool)
6838
+ log_debug(f"Included builtin tool {tool}")
6839
+
6840
+ elif isinstance(tool, Toolkit):
6841
+ # For each function in the toolkit and process entrypoint
6842
+ toolkit_functions = tool.get_async_functions() if async_mode else tool.get_functions()
6843
+ for name, _func in toolkit_functions.items():
6844
+ if name in _function_names:
6847
6845
  continue
6848
- _function_names.append(tool.name)
6846
+ _function_names.append(name)
6847
+ _func = _func.model_copy(deep=True)
6848
+ _func._agent = self
6849
+ # Respect the function's explicit strict setting if set
6850
+ effective_strict = strict if _func.strict is None else _func.strict
6851
+ _func.process_entrypoint(strict=effective_strict)
6852
+ if strict and _func.strict is None:
6853
+ _func.strict = True
6854
+ if self.tool_hooks is not None:
6855
+ _func.tool_hooks = self.tool_hooks
6856
+ _functions.append(_func)
6857
+ log_debug(f"Added tool {name} from {tool.name}")
6849
6858
 
6850
- tool.process_entrypoint(strict=strict)
6851
- tool = tool.model_copy(deep=True)
6859
+ # Add instructions from the toolkit
6860
+ if tool.add_instructions and tool.instructions is not None:
6861
+ self._tool_instructions.append(tool.instructions)
6852
6862
 
6853
- tool._agent = self
6854
- if strict and tool.strict is None:
6855
- tool.strict = True
6863
+ elif isinstance(tool, Function):
6864
+ if tool.name in _function_names:
6865
+ continue
6866
+ _function_names.append(tool.name)
6867
+
6868
+ # Respect the function's explicit strict setting if set
6869
+ effective_strict = strict if tool.strict is None else tool.strict
6870
+ tool.process_entrypoint(strict=effective_strict)
6871
+ tool = tool.model_copy(deep=True)
6872
+
6873
+ tool._agent = self
6874
+ if strict and tool.strict is None:
6875
+ tool.strict = True
6876
+ if self.tool_hooks is not None:
6877
+ tool.tool_hooks = self.tool_hooks
6878
+ _functions.append(tool)
6879
+ log_debug(f"Added tool {tool.name}")
6880
+
6881
+ # Add instructions from the Function
6882
+ if tool.add_instructions and tool.instructions is not None:
6883
+ self._tool_instructions.append(tool.instructions)
6884
+
6885
+ elif callable(tool):
6886
+ try:
6887
+ function_name = tool.__name__
6888
+
6889
+ if function_name in _function_names:
6890
+ continue
6891
+ _function_names.append(function_name)
6892
+
6893
+ _func = Function.from_callable(tool, strict=strict)
6894
+ _func = _func.model_copy(deep=True)
6895
+ _func._agent = self
6896
+ if strict:
6897
+ _func.strict = True
6856
6898
  if self.tool_hooks is not None:
6857
- tool.tool_hooks = self.tool_hooks
6858
- _functions.append(tool)
6859
- log_debug(f"Added tool {tool.name}")
6899
+ _func.tool_hooks = self.tool_hooks
6900
+ _functions.append(_func)
6901
+ log_debug(f"Added tool {_func.name}")
6902
+ except Exception as e:
6903
+ log_warning(f"Could not add tool {tool}: {e}")
6860
6904
 
6861
- # Add instructions from the Function
6862
- if tool.add_instructions and tool.instructions is not None:
6863
- self._tool_instructions.append(tool.instructions)
6905
+ return _functions
6864
6906
 
6865
- elif callable(tool):
6866
- try:
6867
- function_name = tool.__name__
6907
+ def _determine_tools_for_model(
6908
+ self,
6909
+ model: Model,
6910
+ processed_tools: List[Union[Toolkit, Callable, Function, Dict]],
6911
+ run_response: RunOutput,
6912
+ run_context: RunContext,
6913
+ session: AgentSession,
6914
+ async_mode: bool = False,
6915
+ ) -> List[Union[Function, dict]]:
6916
+ _functions: List[Union[Function, dict]] = []
6868
6917
 
6869
- if function_name in _function_names:
6870
- continue
6871
- _function_names.append(function_name)
6872
-
6873
- _func = Function.from_callable(tool, strict=strict)
6874
- _func = _func.model_copy(deep=True)
6875
- _func._agent = self
6876
- if strict:
6877
- _func.strict = True
6878
- if self.tool_hooks is not None:
6879
- _func.tool_hooks = self.tool_hooks
6880
- _functions.append(_func)
6881
- log_debug(f"Added tool {_func.name}")
6882
- except Exception as e:
6883
- log_warning(f"Could not add tool {tool}: {e}")
6918
+ # Get Agent tools
6919
+ if processed_tools is not None and len(processed_tools) > 0:
6920
+ log_debug("Processing tools for model")
6921
+ _functions = self._parse_tools(
6922
+ tools=processed_tools, model=model, run_context=run_context, async_mode=async_mode
6923
+ )
6884
6924
 
6885
6925
  # Update the session state for the functions
6886
6926
  if _functions:
@@ -6902,8 +6942,6 @@ class Agent:
6902
6942
  for func in _functions: # type: ignore
6903
6943
  if isinstance(func, Function):
6904
6944
  func._run_context = run_context
6905
- func._session_state = run_context.session_state
6906
- func._dependencies = run_context.dependencies
6907
6945
  func._images = joint_images
6908
6946
  func._files = joint_files
6909
6947
  func._audios = joint_audios
@@ -7289,6 +7327,647 @@ class Agent:
7289
7327
 
7290
7328
  return agent_session
7291
7329
 
7330
+ # -*- Serialization Functions
7331
+ def to_dict(self) -> Dict[str, Any]:
7332
+ """
7333
+ Convert the Agent to a dictionary.
7334
+
7335
+ Returns:
7336
+ Dict[str, Any]: Dictionary representation of the agent configuration
7337
+ """
7338
+ config: Dict[str, Any] = {}
7339
+
7340
+ # --- Agent Settings ---
7341
+ if self.model is not None:
7342
+ if isinstance(self.model, Model):
7343
+ config["model"] = self.model.to_dict()
7344
+ else:
7345
+ config["model"] = str(self.model)
7346
+ if self.name is not None:
7347
+ config["name"] = self.name
7348
+ if self.id is not None:
7349
+ config["id"] = self.id
7350
+
7351
+ # --- User settings ---
7352
+ if self.user_id is not None:
7353
+ config["user_id"] = self.user_id
7354
+
7355
+ # --- Session settings ---
7356
+ if self.session_id is not None:
7357
+ config["session_id"] = self.session_id
7358
+ if self.session_state is not None:
7359
+ config["session_state"] = self.session_state
7360
+ if self.add_session_state_to_context:
7361
+ config["add_session_state_to_context"] = self.add_session_state_to_context
7362
+ if self.enable_agentic_state:
7363
+ config["enable_agentic_state"] = self.enable_agentic_state
7364
+ if self.overwrite_db_session_state:
7365
+ config["overwrite_db_session_state"] = self.overwrite_db_session_state
7366
+ if self.cache_session:
7367
+ config["cache_session"] = self.cache_session
7368
+ if self.search_session_history:
7369
+ config["search_session_history"] = self.search_session_history
7370
+ if self.num_history_sessions is not None:
7371
+ config["num_history_sessions"] = self.num_history_sessions
7372
+ if self.enable_session_summaries:
7373
+ config["enable_session_summaries"] = self.enable_session_summaries
7374
+ if self.add_session_summary_to_context is not None:
7375
+ config["add_session_summary_to_context"] = self.add_session_summary_to_context
7376
+ # TODO: implement session summary manager serialization
7377
+ # if self.session_summary_manager is not None:
7378
+ # config["session_summary_manager"] = self.session_summary_manager.to_dict()
7379
+
7380
+ # --- Dependencies ---
7381
+ if self.dependencies is not None:
7382
+ config["dependencies"] = self.dependencies
7383
+ if self.add_dependencies_to_context:
7384
+ config["add_dependencies_to_context"] = self.add_dependencies_to_context
7385
+
7386
+ # --- Agentic Memory settings ---
7387
+ # TODO: implement agentic memory serialization
7388
+ # if self.memory_manager is not None:
7389
+ # config["memory_manager"] = self.memory_manager.to_dict()
7390
+ if self.enable_agentic_memory:
7391
+ config["enable_agentic_memory"] = self.enable_agentic_memory
7392
+ if self.enable_user_memories:
7393
+ config["enable_user_memories"] = self.enable_user_memories
7394
+ if self.add_memories_to_context is not None:
7395
+ config["add_memories_to_context"] = self.add_memories_to_context
7396
+
7397
+ # --- Database settings ---
7398
+ if self.db is not None and hasattr(self.db, "to_dict"):
7399
+ config["db"] = self.db.to_dict()
7400
+
7401
+ # --- History settings ---
7402
+ if self.add_history_to_context:
7403
+ config["add_history_to_context"] = self.add_history_to_context
7404
+ if self.num_history_runs is not None:
7405
+ config["num_history_runs"] = self.num_history_runs
7406
+ if self.num_history_messages is not None:
7407
+ config["num_history_messages"] = self.num_history_messages
7408
+ if self.max_tool_calls_from_history is not None:
7409
+ config["max_tool_calls_from_history"] = self.max_tool_calls_from_history
7410
+
7411
+ # --- Knowledge settings ---
7412
+ # TODO: implement knowledge serialization
7413
+ # if self.knowledge is not None:
7414
+ # config["knowledge"] = self.knowledge.to_dict()
7415
+ if self.knowledge_filters is not None:
7416
+ config["knowledge_filters"] = self.knowledge_filters
7417
+ if self.enable_agentic_knowledge_filters:
7418
+ config["enable_agentic_knowledge_filters"] = self.enable_agentic_knowledge_filters
7419
+ if self.add_knowledge_to_context:
7420
+ config["add_knowledge_to_context"] = self.add_knowledge_to_context
7421
+ # Skip knowledge_retriever as it's a callable
7422
+ if self.references_format != "json":
7423
+ config["references_format"] = self.references_format
7424
+
7425
+ # --- Tools ---
7426
+ # Serialize tools to their dictionary representations
7427
+ _tools: List[Union[Function, dict]] = []
7428
+ if self.model is not None:
7429
+ _tools = self._parse_tools(
7430
+ model=self.model,
7431
+ tools=self.tools or [],
7432
+ )
7433
+ if _tools:
7434
+ serialized_tools = []
7435
+ for tool in _tools:
7436
+ try:
7437
+ if isinstance(tool, Function):
7438
+ serialized_tools.append(tool.to_dict())
7439
+ else:
7440
+ serialized_tools.append(tool)
7441
+ except Exception as e:
7442
+ # Skip tools that can't be serialized
7443
+ from agno.utils.log import log_warning
7444
+
7445
+ log_warning(f"Could not serialize tool {tool}: {e}")
7446
+ if serialized_tools:
7447
+ config["tools"] = serialized_tools
7448
+
7449
+ if self.tool_call_limit is not None:
7450
+ config["tool_call_limit"] = self.tool_call_limit
7451
+ if self.tool_choice is not None:
7452
+ config["tool_choice"] = self.tool_choice
7453
+
7454
+ # --- Reasoning settings ---
7455
+ if self.reasoning:
7456
+ config["reasoning"] = self.reasoning
7457
+ if self.reasoning_model is not None:
7458
+ if isinstance(self.reasoning_model, Model):
7459
+ config["reasoning_model"] = self.reasoning_model.to_dict()
7460
+ else:
7461
+ config["reasoning_model"] = str(self.reasoning_model)
7462
+ # Skip reasoning_agent to avoid circular serialization
7463
+ if self.reasoning_min_steps != 1:
7464
+ config["reasoning_min_steps"] = self.reasoning_min_steps
7465
+ if self.reasoning_max_steps != 10:
7466
+ config["reasoning_max_steps"] = self.reasoning_max_steps
7467
+
7468
+ # --- Default tools settings ---
7469
+ if self.read_chat_history:
7470
+ config["read_chat_history"] = self.read_chat_history
7471
+ if not self.search_knowledge:
7472
+ config["search_knowledge"] = self.search_knowledge
7473
+ if self.update_knowledge:
7474
+ config["update_knowledge"] = self.update_knowledge
7475
+ if self.read_tool_call_history:
7476
+ config["read_tool_call_history"] = self.read_tool_call_history
7477
+ if not self.send_media_to_model:
7478
+ config["send_media_to_model"] = self.send_media_to_model
7479
+ if not self.store_media:
7480
+ config["store_media"] = self.store_media
7481
+ if not self.store_tool_messages:
7482
+ config["store_tool_messages"] = self.store_tool_messages
7483
+ if not self.store_history_messages:
7484
+ config["store_history_messages"] = self.store_history_messages
7485
+
7486
+ # --- System message settings ---
7487
+ # Skip system_message if it's a callable or Message object
7488
+ # TODO: Support Message objects
7489
+ if self.system_message is not None and isinstance(self.system_message, str):
7490
+ config["system_message"] = self.system_message
7491
+ if self.system_message_role != "system":
7492
+ config["system_message_role"] = self.system_message_role
7493
+ if not self.build_context:
7494
+ config["build_context"] = self.build_context
7495
+
7496
+ # --- Context building settings ---
7497
+ if self.description is not None:
7498
+ config["description"] = self.description
7499
+ # Handle instructions (can be str, list, or callable)
7500
+ if self.instructions is not None:
7501
+ if isinstance(self.instructions, str):
7502
+ config["instructions"] = self.instructions
7503
+ elif isinstance(self.instructions, list):
7504
+ config["instructions"] = self.instructions
7505
+ # Skip if callable
7506
+ if self.expected_output is not None:
7507
+ config["expected_output"] = self.expected_output
7508
+ if self.additional_context is not None:
7509
+ config["additional_context"] = self.additional_context
7510
+ if self.markdown:
7511
+ config["markdown"] = self.markdown
7512
+ if self.add_name_to_context:
7513
+ config["add_name_to_context"] = self.add_name_to_context
7514
+ if self.add_datetime_to_context:
7515
+ config["add_datetime_to_context"] = self.add_datetime_to_context
7516
+ if self.add_location_to_context:
7517
+ config["add_location_to_context"] = self.add_location_to_context
7518
+ if self.timezone_identifier is not None:
7519
+ config["timezone_identifier"] = self.timezone_identifier
7520
+ if not self.resolve_in_context:
7521
+ config["resolve_in_context"] = self.resolve_in_context
7522
+
7523
+ # --- Additional input ---
7524
+ # Skip additional_input as it may contain complex Message objects
7525
+ # TODO: Support Message objects
7526
+
7527
+ # --- User message settings ---
7528
+ if self.user_message_role != "user":
7529
+ config["user_message_role"] = self.user_message_role
7530
+ if not self.build_user_context:
7531
+ config["build_user_context"] = self.build_user_context
7532
+
7533
+ # --- Response settings ---
7534
+ if self.retries > 0:
7535
+ config["retries"] = self.retries
7536
+ if self.delay_between_retries != 1:
7537
+ config["delay_between_retries"] = self.delay_between_retries
7538
+ if self.exponential_backoff:
7539
+ config["exponential_backoff"] = self.exponential_backoff
7540
+
7541
+ # --- Schema settings ---
7542
+ if self.input_schema is not None:
7543
+ if isinstance(self.input_schema, type) and issubclass(self.input_schema, BaseModel):
7544
+ config["input_schema"] = self.input_schema.__name__
7545
+ elif isinstance(self.input_schema, dict):
7546
+ config["input_schema"] = self.input_schema
7547
+ if self.output_schema is not None:
7548
+ if isinstance(self.output_schema, type) and issubclass(self.output_schema, BaseModel):
7549
+ config["output_schema"] = self.output_schema.__name__
7550
+ elif isinstance(self.output_schema, dict):
7551
+ config["output_schema"] = self.output_schema
7552
+
7553
+ # --- Parser and output settings ---
7554
+ if self.parser_model is not None:
7555
+ if isinstance(self.parser_model, Model):
7556
+ config["parser_model"] = self.parser_model.to_dict()
7557
+ else:
7558
+ config["parser_model"] = str(self.parser_model)
7559
+ if self.parser_model_prompt is not None:
7560
+ config["parser_model_prompt"] = self.parser_model_prompt
7561
+ if self.output_model is not None:
7562
+ if isinstance(self.output_model, Model):
7563
+ config["output_model"] = self.output_model.to_dict()
7564
+ else:
7565
+ config["output_model"] = str(self.output_model)
7566
+ if self.output_model_prompt is not None:
7567
+ config["output_model_prompt"] = self.output_model_prompt
7568
+ if not self.parse_response:
7569
+ config["parse_response"] = self.parse_response
7570
+ if self.structured_outputs is not None:
7571
+ config["structured_outputs"] = self.structured_outputs
7572
+ if self.use_json_mode:
7573
+ config["use_json_mode"] = self.use_json_mode
7574
+ if self.save_response_to_file is not None:
7575
+ config["save_response_to_file"] = self.save_response_to_file
7576
+
7577
+ # --- Streaming settings ---
7578
+ if self.stream is not None:
7579
+ config["stream"] = self.stream
7580
+ if self.stream_events is not None:
7581
+ config["stream_events"] = self.stream_events
7582
+ if self.store_events:
7583
+ config["store_events"] = self.store_events
7584
+ # Skip events_to_skip as it contains RunEvent enums
7585
+
7586
+ # --- Role and culture settings ---
7587
+ if self.role is not None:
7588
+ config["role"] = self.role
7589
+ # --- Team and workflow settings ---
7590
+ if self.team_id is not None:
7591
+ config["team_id"] = self.team_id
7592
+ if self.workflow_id is not None:
7593
+ config["workflow_id"] = self.workflow_id
7594
+
7595
+ # --- Metadata ---
7596
+ if self.metadata is not None:
7597
+ config["metadata"] = self.metadata
7598
+
7599
+ # --- Context compression settings ---
7600
+ if self.compress_tool_results:
7601
+ config["compress_tool_results"] = self.compress_tool_results
7602
+ # TODO: implement compression manager serialization
7603
+ # if self.compression_manager is not None:
7604
+ # config["compression_manager"] = self.compression_manager.to_dict()
7605
+
7606
+ # --- Debug and telemetry settings ---
7607
+ if self.debug_mode:
7608
+ config["debug_mode"] = self.debug_mode
7609
+ if self.debug_level != 1:
7610
+ config["debug_level"] = self.debug_level
7611
+ if not self.telemetry:
7612
+ config["telemetry"] = self.telemetry
7613
+
7614
+ return config
7615
+
7616
+ @classmethod
7617
+ def from_dict(cls, data: Dict[str, Any], registry: Optional[Registry] = None) -> "Agent":
7618
+ """
7619
+ Create an agent from a dictionary.
7620
+
7621
+ Args:
7622
+ data: Dictionary containing agent configuration
7623
+ registry: Optional registry for rehydrating tools and schemas
7624
+
7625
+ Returns:
7626
+ Agent: Reconstructed agent instance
7627
+ """
7628
+ from agno.models.utils import get_model
7629
+
7630
+ config = data.copy()
7631
+
7632
+ # --- Handle Model reconstruction ---
7633
+ if "model" in config:
7634
+ model_data = config["model"]
7635
+ if isinstance(model_data, dict) and "id" in model_data:
7636
+ config["model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
7637
+ elif isinstance(model_data, str):
7638
+ config["model"] = get_model(model_data)
7639
+
7640
+ # --- Handle reasoning_model reconstruction ---
7641
+ # TODO: implement reasoning model deserialization
7642
+ # if "reasoning_model" in config:
7643
+ # model_data = config["reasoning_model"]
7644
+ # if isinstance(model_data, dict) and "id" in model_data:
7645
+ # config["reasoning_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
7646
+ # elif isinstance(model_data, str):
7647
+ # config["reasoning_model"] = get_model(model_data)
7648
+
7649
+ # --- Handle parser_model reconstruction ---
7650
+ # TODO: implement parser model deserialization
7651
+ # if "parser_model" in config:
7652
+ # model_data = config["parser_model"]
7653
+ # if isinstance(model_data, dict) and "id" in model_data:
7654
+ # config["parser_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
7655
+ # elif isinstance(model_data, str):
7656
+ # config["parser_model"] = get_model(model_data)
7657
+
7658
+ # --- Handle output_model reconstruction ---
7659
+ # TODO: implement output model deserialization
7660
+ # if "output_model" in config:
7661
+ # model_data = config["output_model"]
7662
+ # if isinstance(model_data, dict) and "id" in model_data:
7663
+ # config["output_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
7664
+ # elif isinstance(model_data, str):
7665
+ # config["output_model"] = get_model(model_data)
7666
+
7667
+ # --- Handle tools reconstruction ---
7668
+ if "tools" in config and config["tools"]:
7669
+ if registry:
7670
+ config["tools"] = [registry.rehydrate_function(t) for t in config["tools"]]
7671
+ else:
7672
+ log_warning("No registry provided, tools will not be rehydrated.")
7673
+ del config["tools"]
7674
+
7675
+ # --- Handle DB reconstruction ---
7676
+ if "db" in config and isinstance(config["db"], dict):
7677
+ db_data = config["db"]
7678
+ db_id = db_data.get("id")
7679
+
7680
+ # First try to get the db from the registry (preferred - reuses existing connection)
7681
+ if registry and db_id:
7682
+ registry_db = registry.get_db(db_id)
7683
+ if registry_db is not None:
7684
+ config["db"] = registry_db
7685
+ else:
7686
+ del config["db"]
7687
+ else:
7688
+ # No registry or no db_id, fall back to creating from dict
7689
+ config["db"] = db_from_dict(db_data)
7690
+ if config["db"] is None:
7691
+ del config["db"]
7692
+
7693
+ # --- Handle Schema reconstruction ---
7694
+ if "input_schema" in config and isinstance(config["input_schema"], str):
7695
+ schema_cls = registry.get_schema(config["input_schema"]) if registry else None
7696
+ if schema_cls:
7697
+ config["input_schema"] = schema_cls
7698
+ else:
7699
+ log_warning(f"Input schema {config['input_schema']} not found in registry, skipping.")
7700
+ del config["input_schema"]
7701
+
7702
+ if "output_schema" in config and isinstance(config["output_schema"], str):
7703
+ schema_cls = registry.get_schema(config["output_schema"]) if registry else None
7704
+ if schema_cls:
7705
+ config["output_schema"] = schema_cls
7706
+ else:
7707
+ log_warning(f"Output schema {config['output_schema']} not found in registry, skipping.")
7708
+ del config["output_schema"]
7709
+
7710
+ # --- Handle MemoryManager reconstruction ---
7711
+ # TODO: implement memory manager deserialization
7712
+ # if "memory_manager" in config and isinstance(config["memory_manager"], dict):
7713
+ # from agno.memory import MemoryManager
7714
+ # config["memory_manager"] = MemoryManager.from_dict(config["memory_manager"])
7715
+
7716
+ # --- Handle SessionSummaryManager reconstruction ---
7717
+ # TODO: implement session summary manager deserialization
7718
+ # if "session_summary_manager" in config and isinstance(config["session_summary_manager"], dict):
7719
+ # from agno.session import SessionSummaryManager
7720
+ # config["session_summary_manager"] = SessionSummaryManager.from_dict(config["session_summary_manager"])
7721
+
7722
+ # --- Handle CultureManager reconstruction ---
7723
+ # TODO: implement culture manager deserialization
7724
+ # if "culture_manager" in config and isinstance(config["culture_manager"], dict):
7725
+ # from agno.culture import CultureManager
7726
+ # config["culture_manager"] = CultureManager.from_dict(config["culture_manager"])
7727
+
7728
+ # --- Handle Knowledge reconstruction ---
7729
+ # TODO: implement knowledge deserialization
7730
+ # if "knowledge" in config and isinstance(config["knowledge"], dict):
7731
+ # from agno.knowledge import Knowledge
7732
+ # config["knowledge"] = Knowledge.from_dict(config["knowledge"])
7733
+
7734
+ # --- Handle CompressionManager reconstruction ---
7735
+ # TODO: implement compression manager deserialization
7736
+ # if "compression_manager" in config and isinstance(config["compression_manager"], dict):
7737
+ # from agno.compression.manager import CompressionManager
7738
+ # config["compression_manager"] = CompressionManager.from_dict(config["compression_manager"])
7739
+
7740
+ # Remove keys that aren't constructor parameters
7741
+ config.pop("team_id", None)
7742
+ config.pop("workflow_id", None)
7743
+
7744
+ return cls(
7745
+ # --- Agent settings ---
7746
+ model=config.get("model"),
7747
+ name=config.get("name"),
7748
+ id=config.get("id"),
7749
+ # --- User settings ---
7750
+ user_id=config.get("user_id"),
7751
+ # --- Session settings ---
7752
+ session_id=config.get("session_id"),
7753
+ session_state=config.get("session_state"),
7754
+ add_session_state_to_context=config.get("add_session_state_to_context", False),
7755
+ enable_agentic_state=config.get("enable_agentic_state", False),
7756
+ overwrite_db_session_state=config.get("overwrite_db_session_state", False),
7757
+ cache_session=config.get("cache_session", False),
7758
+ search_session_history=config.get("search_session_history", False),
7759
+ num_history_sessions=config.get("num_history_sessions"),
7760
+ enable_session_summaries=config.get("enable_session_summaries", False),
7761
+ add_session_summary_to_context=config.get("add_session_summary_to_context"),
7762
+ # session_summary_manager=config.get("session_summary_manager"), # TODO
7763
+ # --- Dependencies ---
7764
+ dependencies=config.get("dependencies"),
7765
+ add_dependencies_to_context=config.get("add_dependencies_to_context", False),
7766
+ # --- Agentic Memory settings ---
7767
+ # memory_manager=config.get("memory_manager"), # TODO
7768
+ enable_agentic_memory=config.get("enable_agentic_memory", False),
7769
+ enable_user_memories=config.get("enable_user_memories", False),
7770
+ add_memories_to_context=config.get("add_memories_to_context"),
7771
+ # --- Database settings ---
7772
+ db=config.get("db"),
7773
+ # --- History settings ---
7774
+ add_history_to_context=config.get("add_history_to_context", False),
7775
+ num_history_runs=config.get("num_history_runs"),
7776
+ num_history_messages=config.get("num_history_messages"),
7777
+ max_tool_calls_from_history=config.get("max_tool_calls_from_history"),
7778
+ # --- Knowledge settings ---
7779
+ # knowledge=config.get("knowledge"), # TODO
7780
+ knowledge_filters=config.get("knowledge_filters"),
7781
+ enable_agentic_knowledge_filters=config.get("enable_agentic_knowledge_filters", False),
7782
+ add_knowledge_to_context=config.get("add_knowledge_to_context", False),
7783
+ references_format=config.get("references_format", "json"),
7784
+ # --- Tools ---
7785
+ tools=config.get("tools"),
7786
+ tool_call_limit=config.get("tool_call_limit"),
7787
+ tool_choice=config.get("tool_choice"),
7788
+ # --- Reasoning settings ---
7789
+ reasoning=config.get("reasoning", False),
7790
+ # reasoning_model=config.get("reasoning_model"), # TODO
7791
+ reasoning_min_steps=config.get("reasoning_min_steps", 1),
7792
+ reasoning_max_steps=config.get("reasoning_max_steps", 10),
7793
+ # --- Default tools settings ---
7794
+ read_chat_history=config.get("read_chat_history", False),
7795
+ search_knowledge=config.get("search_knowledge", True),
7796
+ update_knowledge=config.get("update_knowledge", False),
7797
+ read_tool_call_history=config.get("read_tool_call_history", False),
7798
+ send_media_to_model=config.get("send_media_to_model", True),
7799
+ store_media=config.get("store_media", True),
7800
+ store_tool_messages=config.get("store_tool_messages", True),
7801
+ store_history_messages=config.get("store_history_messages", True),
7802
+ # --- System message settings ---
7803
+ system_message=config.get("system_message"),
7804
+ system_message_role=config.get("system_message_role", "system"),
7805
+ build_context=config.get("build_context", True),
7806
+ # --- Context building settings ---
7807
+ description=config.get("description"),
7808
+ instructions=config.get("instructions"),
7809
+ expected_output=config.get("expected_output"),
7810
+ additional_context=config.get("additional_context"),
7811
+ markdown=config.get("markdown", False),
7812
+ add_name_to_context=config.get("add_name_to_context", False),
7813
+ add_datetime_to_context=config.get("add_datetime_to_context", False),
7814
+ add_location_to_context=config.get("add_location_to_context", False),
7815
+ timezone_identifier=config.get("timezone_identifier"),
7816
+ resolve_in_context=config.get("resolve_in_context", True),
7817
+ # --- User message settings ---
7818
+ user_message_role=config.get("user_message_role", "user"),
7819
+ build_user_context=config.get("build_user_context", True),
7820
+ # --- Response settings ---
7821
+ retries=config.get("retries", 0),
7822
+ delay_between_retries=config.get("delay_between_retries", 1),
7823
+ exponential_backoff=config.get("exponential_backoff", False),
7824
+ # --- Schema settings ---
7825
+ input_schema=config.get("input_schema"),
7826
+ output_schema=config.get("output_schema"),
7827
+ # --- Parser and output settings ---
7828
+ # parser_model=config.get("parser_model"), # TODO
7829
+ parser_model_prompt=config.get("parser_model_prompt"),
7830
+ # output_model=config.get("output_model"), # TODO
7831
+ output_model_prompt=config.get("output_model_prompt"),
7832
+ parse_response=config.get("parse_response", True),
7833
+ structured_outputs=config.get("structured_outputs"),
7834
+ use_json_mode=config.get("use_json_mode", False),
7835
+ save_response_to_file=config.get("save_response_to_file"),
7836
+ # --- Streaming settings ---
7837
+ stream=config.get("stream"),
7838
+ stream_events=config.get("stream_events"),
7839
+ store_events=config.get("store_events", False),
7840
+ role=config.get("role"),
7841
+ # --- Culture settings ---
7842
+ # culture_manager=config.get("culture_manager"), # TODO
7843
+ # --- Metadata ---
7844
+ metadata=config.get("metadata"),
7845
+ # --- Compression settings ---
7846
+ compress_tool_results=config.get("compress_tool_results", False),
7847
+ # compression_manager=config.get("compression_manager"), # TODO
7848
+ # --- Debug and telemetry settings ---
7849
+ debug_mode=config.get("debug_mode", False),
7850
+ debug_level=config.get("debug_level", 1),
7851
+ telemetry=config.get("telemetry", True),
7852
+ )
7853
+
7854
+ # -*- Component and Config Functions
7855
+ def save(
7856
+ self,
7857
+ *,
7858
+ db: Optional["BaseDb"] = None,
7859
+ stage: str = "published",
7860
+ label: Optional[str] = None,
7861
+ notes: Optional[str] = None,
7862
+ ) -> Optional[int]:
7863
+ """
7864
+ Save the agent component and config.
7865
+
7866
+ Args:
7867
+ db: The database to save the component and config to.
7868
+ stage: The stage of the component. Defaults to "published".
7869
+ label: The label of the component.
7870
+ notes: The notes of the component.
7871
+
7872
+ Returns:
7873
+ Optional[int]: The version number of the saved config.
7874
+ """
7875
+ db_ = db or self.db
7876
+ if not db_:
7877
+ raise ValueError("Db not initialized or provided")
7878
+ if not isinstance(db_, BaseDb):
7879
+ raise ValueError("Async databases not yet supported for save(). Use a sync database.")
7880
+
7881
+ if self.id is None:
7882
+ self.id = generate_id_from_name(self.name)
7883
+
7884
+ try:
7885
+ # Create or update component
7886
+ db_.upsert_component(
7887
+ component_id=self.id,
7888
+ component_type=ComponentType.AGENT,
7889
+ name=getattr(self, "name", self.id),
7890
+ description=getattr(self, "description", None),
7891
+ metadata=getattr(self, "metadata", None),
7892
+ )
7893
+
7894
+ # Create or update config
7895
+ config = db_.upsert_config(
7896
+ component_id=self.id,
7897
+ config=self.to_dict(),
7898
+ label=label,
7899
+ stage=stage,
7900
+ notes=notes,
7901
+ )
7902
+
7903
+ return config.get("version")
7904
+
7905
+ except Exception as e:
7906
+ log_error(f"Error saving Agent to database: {e}")
7907
+ raise
7908
+
7909
+ @classmethod
7910
+ def load(
7911
+ cls,
7912
+ id: str,
7913
+ *,
7914
+ db: "BaseDb",
7915
+ registry: Optional["Registry"] = None,
7916
+ label: Optional[str] = None,
7917
+ version: Optional[int] = None,
7918
+ ) -> Optional["Agent"]:
7919
+ """
7920
+ Load an agent by id.
7921
+
7922
+ Args:
7923
+ id: The id of the agent to load.
7924
+ db: The database to load the agent from.
7925
+ label: The label of the agent to load.
7926
+
7927
+ Returns:
7928
+ The agent loaded from the database or None if not found.
7929
+ """
7930
+
7931
+ data = db.get_config(component_id=id, label=label, version=version)
7932
+ if data is None:
7933
+ return None
7934
+
7935
+ config = data.get("config")
7936
+ if config is None:
7937
+ return None
7938
+
7939
+ agent = cls.from_dict(config, registry=registry)
7940
+ agent.id = id
7941
+ agent.db = db
7942
+
7943
+ return agent
7944
+
7945
+ def delete(
7946
+ self,
7947
+ *,
7948
+ db: Optional["BaseDb"] = None,
7949
+ hard_delete: bool = False,
7950
+ ) -> bool:
7951
+ """
7952
+ Delete the agent component.
7953
+
7954
+ Args:
7955
+ db: The database to delete the component from.
7956
+ hard_delete: Whether to hard delete the component.
7957
+
7958
+ Returns:
7959
+ True if the component was deleted, False otherwise.
7960
+ """
7961
+ db_ = db or self.db
7962
+ if not db_:
7963
+ raise ValueError("Db not initialized or provided")
7964
+ if not isinstance(db_, BaseDb):
7965
+ raise ValueError("Async databases not yet supported for delete(). Use a sync database.")
7966
+ if self.id is None:
7967
+ raise ValueError("Cannot delete agent without an id")
7968
+
7969
+ return db_.delete_component(component_id=self.id, hard_delete=hard_delete)
7970
+
7292
7971
  # -*- Public Convenience Functions
7293
7972
  def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[RunOutput]:
7294
7973
  """
@@ -8009,12 +8688,9 @@ class Agent:
8009
8688
  def _format_message_with_state_variables(
8010
8689
  self,
8011
8690
  message: Any,
8012
- session_state: Optional[Dict[str, Any]] = None,
8013
- dependencies: Optional[Dict[str, Any]] = None,
8014
- metadata: Optional[Dict[str, Any]] = None,
8015
- user_id: Optional[str] = None,
8691
+ run_context: Optional[RunContext] = None,
8016
8692
  ) -> Any:
8017
- """Format a message with the session state variables."""
8693
+ """Format a message with the session state variables from run_context."""
8018
8694
  import re
8019
8695
  import string
8020
8696
  from copy import deepcopy
@@ -8022,6 +8698,12 @@ class Agent:
8022
8698
  if not isinstance(message, str):
8023
8699
  return message
8024
8700
 
8701
+ # Extract values from run_context
8702
+ session_state = run_context.session_state if run_context else None
8703
+ dependencies = run_context.dependencies if run_context else None
8704
+ metadata = run_context.metadata if run_context else None
8705
+ user_id = run_context.user_id if run_context else None
8706
+
8025
8707
  # Should already be resolved and passed from run() method
8026
8708
  format_variables = ChainMap(
8027
8709
  session_state if session_state is not None else {},
@@ -8050,12 +8732,8 @@ class Agent:
8050
8732
  self,
8051
8733
  session: AgentSession,
8052
8734
  run_context: Optional[RunContext] = None,
8053
- user_id: Optional[str] = None,
8054
8735
  tools: Optional[List[Union[Function, dict]]] = None,
8055
8736
  add_session_state_to_context: Optional[bool] = None,
8056
- session_state: Optional[Dict[str, Any]] = None, # Deprecated
8057
- dependencies: Optional[Dict[str, Any]] = None, # Deprecated
8058
- metadata: Optional[Dict[str, Any]] = None, # Deprecated
8059
8737
  ) -> Optional[Message]:
8060
8738
  """Return the system message for the Agent.
8061
8739
 
@@ -8064,11 +8742,9 @@ class Agent:
8064
8742
  3. Build and return the default system message for the Agent.
8065
8743
  """
8066
8744
 
8067
- # Consider both run_context and session_state, dependencies, metadata (deprecated fields)
8068
- if run_context is not None:
8069
- session_state = run_context.session_state or session_state
8070
- dependencies = run_context.dependencies or dependencies
8071
- metadata = run_context.metadata or metadata
8745
+ # Extract values from run_context
8746
+ session_state = run_context.session_state if run_context else None
8747
+ user_id = run_context.user_id if run_context else None
8072
8748
 
8073
8749
  # Get output_schema from run_context
8074
8750
  output_schema = run_context.output_schema if run_context else None
@@ -8091,10 +8767,7 @@ class Agent:
8091
8767
  if self.resolve_in_context:
8092
8768
  sys_message_content = self._format_message_with_state_variables(
8093
8769
  sys_message_content,
8094
- user_id=user_id,
8095
- session_state=session_state,
8096
- dependencies=dependencies,
8097
- metadata=metadata,
8770
+ run_context=run_context,
8098
8771
  )
8099
8772
 
8100
8773
  # type: ignore
@@ -8173,32 +8846,15 @@ class Agent:
8173
8846
  if self.name is not None and self.add_name_to_context:
8174
8847
  additional_information.append(f"Your name is: {self.name}.")
8175
8848
 
8176
- # 3.2.5 Add information about agentic filters if enabled
8177
- if self.knowledge is not None and self.enable_agentic_knowledge_filters:
8178
- valid_filters = self.knowledge.get_valid_filters()
8179
- if valid_filters:
8180
- valid_filters_str = ", ".join(valid_filters)
8181
- additional_information.append(
8182
- dedent(
8183
- f"""
8184
- The knowledge base contains documents with these metadata filters: {valid_filters_str}.
8185
- Always use filters when the user query indicates specific metadata.
8186
-
8187
- Examples:
8188
- 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
8189
- 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
8190
- 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
8191
-
8192
- General Guidelines:
8193
- - Always analyze the user query to identify relevant metadata.
8194
- - Use the most specific filter(s) possible to narrow down results.
8195
- - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
8196
- - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
8197
-
8198
- You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
8199
- """
8200
- )
8849
+ # 3.2.5 Add knowledge context using protocol's build_context
8850
+ if self.knowledge is not None:
8851
+ build_context_fn = getattr(self.knowledge, "build_context", None)
8852
+ if callable(build_context_fn):
8853
+ knowledge_context = build_context_fn(
8854
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
8201
8855
  )
8856
+ if knowledge_context:
8857
+ additional_information.append(knowledge_context)
8202
8858
 
8203
8859
  # 3.3 Build the default system message for the Agent.
8204
8860
  system_message_content: str = ""
@@ -8208,22 +8864,29 @@ class Agent:
8208
8864
  # 3.3.2 Then add the Agent role if provided
8209
8865
  if self.role is not None:
8210
8866
  system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
8211
- # 3.3.4 Then add instructions for the Agent
8867
+ # 3.3.3 Then add instructions for the Agent
8212
8868
  if len(instructions) > 0:
8213
- system_message_content += "<instructions>"
8214
- if len(instructions) > 1:
8215
- for _upi in instructions:
8216
- system_message_content += f"\n- {_upi}"
8869
+ if self.use_instruction_tags:
8870
+ system_message_content += "<instructions>"
8871
+ if len(instructions) > 1:
8872
+ for _upi in instructions:
8873
+ system_message_content += f"\n- {_upi}"
8874
+ else:
8875
+ system_message_content += "\n" + instructions[0]
8876
+ system_message_content += "\n</instructions>\n\n"
8217
8877
  else:
8218
- system_message_content += "\n" + instructions[0]
8219
- system_message_content += "\n</instructions>\n\n"
8220
- # 3.3.6 Add additional information
8878
+ if len(instructions) > 1:
8879
+ for _upi in instructions:
8880
+ system_message_content += f"- {_upi}\n"
8881
+ else:
8882
+ system_message_content += instructions[0] + "\n\n"
8883
+ # 3.3.4 Add additional information
8221
8884
  if len(additional_information) > 0:
8222
8885
  system_message_content += "<additional_information>"
8223
8886
  for _ai in additional_information:
8224
8887
  system_message_content += f"\n- {_ai}"
8225
8888
  system_message_content += "\n</additional_information>\n\n"
8226
- # 3.3.7 Then add instructions for the tools
8889
+ # 3.3.5 Then add instructions for the tools
8227
8890
  if self._tool_instructions is not None:
8228
8891
  for _ti in self._tool_instructions:
8229
8892
  system_message_content += f"{_ti}\n"
@@ -8232,10 +8895,7 @@ class Agent:
8232
8895
  if self.resolve_in_context:
8233
8896
  system_message_content = self._format_message_with_state_variables(
8234
8897
  system_message_content,
8235
- user_id=user_id,
8236
- session_state=session_state,
8237
- dependencies=dependencies,
8238
- metadata=metadata,
8898
+ run_context=run_context,
8239
8899
  )
8240
8900
 
8241
8901
  # 3.3.7 Then add the expected output
@@ -8408,12 +9068,8 @@ class Agent:
8408
9068
  self,
8409
9069
  session: AgentSession,
8410
9070
  run_context: Optional[RunContext] = None,
8411
- user_id: Optional[str] = None,
8412
9071
  tools: Optional[List[Union[Function, dict]]] = None,
8413
9072
  add_session_state_to_context: Optional[bool] = None,
8414
- session_state: Optional[Dict[str, Any]] = None, # Deprecated
8415
- dependencies: Optional[Dict[str, Any]] = None, # Deprecated
8416
- metadata: Optional[Dict[str, Any]] = None, # Deprecated
8417
9073
  ) -> Optional[Message]:
8418
9074
  """Return the system message for the Agent.
8419
9075
 
@@ -8422,11 +9078,9 @@ class Agent:
8422
9078
  3. Build and return the default system message for the Agent.
8423
9079
  """
8424
9080
 
8425
- # Consider both run_context and session_state, dependencies, metadata (deprecated fields)
8426
- if run_context is not None:
8427
- session_state = run_context.session_state or session_state
8428
- dependencies = run_context.dependencies or dependencies
8429
- metadata = run_context.metadata or metadata
9081
+ # Extract values from run_context
9082
+ session_state = run_context.session_state if run_context else None
9083
+ user_id = run_context.user_id if run_context else None
8430
9084
 
8431
9085
  # Get output_schema from run_context
8432
9086
  output_schema = run_context.output_schema if run_context else None
@@ -8450,10 +9104,7 @@ class Agent:
8450
9104
  if self.resolve_in_context:
8451
9105
  sys_message_content = self._format_message_with_state_variables(
8452
9106
  sys_message_content,
8453
- user_id=user_id,
8454
- dependencies=dependencies,
8455
- metadata=metadata,
8456
- session_state=session_state,
9107
+ run_context=run_context,
8457
9108
  )
8458
9109
 
8459
9110
  # type: ignore
@@ -8532,32 +9183,23 @@ class Agent:
8532
9183
  if self.name is not None and self.add_name_to_context:
8533
9184
  additional_information.append(f"Your name is: {self.name}.")
8534
9185
 
8535
- # 3.2.5 Add information about agentic filters if enabled
8536
- if self.knowledge is not None and self.enable_agentic_knowledge_filters:
8537
- valid_filters = await self.knowledge.async_get_valid_filters()
8538
- if valid_filters:
8539
- valid_filters_str = ", ".join(valid_filters)
8540
- additional_information.append(
8541
- dedent(
8542
- f"""
8543
- The knowledge base contains documents with these metadata filters: {valid_filters_str}.
8544
- Always use filters when the user query indicates specific metadata.
8545
-
8546
- Examples:
8547
- 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
8548
- 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
8549
- 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
8550
-
8551
- General Guidelines:
8552
- - Always analyze the user query to identify relevant metadata.
8553
- - Use the most specific filter(s) possible to narrow down results.
8554
- - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
8555
- - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
8556
-
8557
- You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
8558
- """
8559
- )
9186
+ # 3.2.5 Add knowledge context using protocol's build_context (async)
9187
+ if self.knowledge is not None:
9188
+ # Prefer async version if available for async databases
9189
+ abuild_context_fn = getattr(self.knowledge, "abuild_context", None)
9190
+ build_context_fn = getattr(self.knowledge, "build_context", None)
9191
+ if callable(abuild_context_fn):
9192
+ knowledge_context = await abuild_context_fn(
9193
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
9194
+ )
9195
+ if knowledge_context:
9196
+ additional_information.append(knowledge_context)
9197
+ elif callable(build_context_fn):
9198
+ knowledge_context = build_context_fn(
9199
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
8560
9200
  )
9201
+ if knowledge_context:
9202
+ additional_information.append(knowledge_context)
8561
9203
 
8562
9204
  # 3.3 Build the default system message for the Agent.
8563
9205
  system_message_content: str = ""
@@ -8567,22 +9209,29 @@ class Agent:
8567
9209
  # 3.3.2 Then add the Agent role if provided
8568
9210
  if self.role is not None:
8569
9211
  system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
8570
- # 3.3.4 Then add instructions for the Agent
9212
+ # 3.3.3 Then add instructions for the Agent
8571
9213
  if len(instructions) > 0:
8572
- system_message_content += "<instructions>"
8573
- if len(instructions) > 1:
8574
- for _upi in instructions:
8575
- system_message_content += f"\n- {_upi}"
9214
+ if self.use_instruction_tags:
9215
+ system_message_content += "<instructions>"
9216
+ if len(instructions) > 1:
9217
+ for _upi in instructions:
9218
+ system_message_content += f"\n- {_upi}"
9219
+ else:
9220
+ system_message_content += "\n" + instructions[0]
9221
+ system_message_content += "\n</instructions>\n\n"
8576
9222
  else:
8577
- system_message_content += "\n" + instructions[0]
8578
- system_message_content += "\n</instructions>\n\n"
8579
- # 3.3.6 Add additional information
9223
+ if len(instructions) > 1:
9224
+ for _upi in instructions:
9225
+ system_message_content += f"- {_upi}\n"
9226
+ else:
9227
+ system_message_content += instructions[0] + "\n\n"
9228
+ # 3.3.4 Add additional information
8580
9229
  if len(additional_information) > 0:
8581
9230
  system_message_content += "<additional_information>"
8582
9231
  for _ai in additional_information:
8583
9232
  system_message_content += f"\n- {_ai}"
8584
9233
  system_message_content += "\n</additional_information>\n\n"
8585
- # 3.3.7 Then add instructions for the tools
9234
+ # 3.3.5 Then add instructions for the tools
8586
9235
  if self._tool_instructions is not None:
8587
9236
  for _ti in self._tool_instructions:
8588
9237
  system_message_content += f"{_ti}\n"
@@ -8591,10 +9240,7 @@ class Agent:
8591
9240
  if self.resolve_in_context:
8592
9241
  system_message_content = self._format_message_with_state_variables(
8593
9242
  system_message_content,
8594
- user_id=user_id,
8595
- session_state=session_state,
8596
- dependencies=dependencies,
8597
- metadata=metadata,
9243
+ run_context=run_context,
8598
9244
  )
8599
9245
 
8600
9246
  # 3.3.7 Then add the expected output
@@ -8774,17 +9420,12 @@ class Agent:
8774
9420
  *,
8775
9421
  run_response: RunOutput,
8776
9422
  run_context: Optional[RunContext] = None,
8777
- session_state: Optional[Dict[str, Any]] = None,
8778
- dependencies: Optional[Dict[str, Any]] = None,
8779
- metadata: Optional[Dict[str, Any]] = None,
8780
- user_id: Optional[str] = None,
8781
9423
  input: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
8782
9424
  audio: Optional[Sequence[Audio]] = None,
8783
9425
  images: Optional[Sequence[Image]] = None,
8784
9426
  videos: Optional[Sequence[Video]] = None,
8785
9427
  files: Optional[Sequence[File]] = None,
8786
9428
  add_dependencies_to_context: Optional[bool] = None,
8787
- knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
8788
9429
  **kwargs: Any,
8789
9430
  ) -> Optional[Message]:
8790
9431
  """Return the user message for the Agent.
@@ -8793,12 +9434,9 @@ class Agent:
8793
9434
  2. If build_user_context is False or if the message is a list, return the message as is.
8794
9435
  3. Build the default user message for the Agent
8795
9436
  """
8796
- # Consider both run_context and session_state, dependencies, metadata, knowledge_filters (deprecated fields)
8797
- if run_context is not None:
8798
- session_state = run_context.session_state or session_state
8799
- dependencies = run_context.dependencies or dependencies
8800
- metadata = run_context.metadata or metadata
8801
- knowledge_filters = run_context.knowledge_filters or knowledge_filters
9437
+ # Extract values from run_context
9438
+ dependencies = run_context.dependencies if run_context else None
9439
+ knowledge_filters = run_context.knowledge_filters if run_context else None
8802
9440
  # Get references from the knowledge base to use in the user message
8803
9441
  references = None
8804
9442
 
@@ -8903,10 +9541,7 @@ class Agent:
8903
9541
  if self.resolve_in_context:
8904
9542
  user_msg_content = self._format_message_with_state_variables(
8905
9543
  user_msg_content,
8906
- user_id=user_id,
8907
- session_state=session_state,
8908
- dependencies=dependencies,
8909
- metadata=metadata,
9544
+ run_context=run_context,
8910
9545
  )
8911
9546
 
8912
9547
  # Convert to string for concatenation operations
@@ -8948,17 +9583,12 @@ class Agent:
8948
9583
  *,
8949
9584
  run_response: RunOutput,
8950
9585
  run_context: Optional[RunContext] = None,
8951
- session_state: Optional[Dict[str, Any]] = None,
8952
- dependencies: Optional[Dict[str, Any]] = None,
8953
- metadata: Optional[Dict[str, Any]] = None,
8954
- user_id: Optional[str] = None,
8955
9586
  input: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
8956
9587
  audio: Optional[Sequence[Audio]] = None,
8957
9588
  images: Optional[Sequence[Image]] = None,
8958
9589
  videos: Optional[Sequence[Video]] = None,
8959
9590
  files: Optional[Sequence[File]] = None,
8960
9591
  add_dependencies_to_context: Optional[bool] = None,
8961
- knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
8962
9592
  **kwargs: Any,
8963
9593
  ) -> Optional[Message]:
8964
9594
  """Return the user message for the Agent (async version).
@@ -8967,12 +9597,9 @@ class Agent:
8967
9597
  2. If build_user_context is False or if the message is a list, return the message as is.
8968
9598
  3. Build the default user message for the Agent
8969
9599
  """
8970
- # Consider both run_context and session_state, dependencies, metadata, knowledge_filters (deprecated fields)
8971
- if run_context is not None:
8972
- session_state = run_context.session_state or session_state
8973
- dependencies = run_context.dependencies or dependencies
8974
- metadata = run_context.metadata or metadata
8975
- knowledge_filters = run_context.knowledge_filters or knowledge_filters
9600
+ # Extract values from run_context
9601
+ dependencies = run_context.dependencies if run_context else None
9602
+ knowledge_filters = run_context.knowledge_filters if run_context else None
8976
9603
  # Get references from the knowledge base to use in the user message
8977
9604
  references = None
8978
9605
 
@@ -9077,10 +9704,7 @@ class Agent:
9077
9704
  if self.resolve_in_context:
9078
9705
  user_msg_content = self._format_message_with_state_variables(
9079
9706
  user_msg_content,
9080
- user_id=user_id,
9081
- session_state=session_state,
9082
- dependencies=dependencies,
9083
- metadata=metadata,
9707
+ run_context=run_context,
9084
9708
  )
9085
9709
 
9086
9710
  # Convert to string for concatenation operations
@@ -9166,10 +9790,6 @@ class Agent:
9166
9790
  system_message = self.get_system_message(
9167
9791
  session=session,
9168
9792
  run_context=run_context,
9169
- session_state=run_context.session_state,
9170
- dependencies=run_context.dependencies,
9171
- metadata=run_context.metadata,
9172
- user_id=user_id,
9173
9793
  tools=tools,
9174
9794
  add_session_state_to_context=add_session_state_to_context,
9175
9795
  )
@@ -9366,12 +9986,6 @@ class Agent:
9366
9986
  )
9367
9987
  """
9368
9988
 
9369
- # Consider both run_context and session_state, dependencies, metadata (deprecated fields)
9370
- if run_context is not None:
9371
- session_state = run_context.session_state or session_state
9372
- dependencies = run_context.dependencies or dependencies
9373
- metadata = run_context.metadata or metadata
9374
-
9375
9989
  # Initialize the RunMessages object (no media here - that's in RunInput now)
9376
9990
  run_messages = RunMessages()
9377
9991
 
@@ -9379,11 +9993,7 @@ class Agent:
9379
9993
  system_message = await self.aget_system_message(
9380
9994
  session=session,
9381
9995
  run_context=run_context,
9382
- session_state=session_state,
9383
- user_id=user_id,
9384
9996
  tools=tools,
9385
- dependencies=dependencies,
9386
- metadata=metadata,
9387
9997
  add_session_state_to_context=add_session_state_to_context,
9388
9998
  )
9389
9999
  if system_message is not None:
@@ -9469,15 +10079,11 @@ class Agent:
9469
10079
  user_message = await self._aget_user_message(
9470
10080
  run_response=run_response,
9471
10081
  run_context=run_context,
9472
- session_state=session_state,
9473
- dependencies=dependencies,
9474
- metadata=metadata,
9475
10082
  input=input,
9476
10083
  audio=audio,
9477
10084
  images=images,
9478
10085
  videos=videos,
9479
10086
  files=files,
9480
- knowledge_filters=knowledge_filters,
9481
10087
  add_dependencies_to_context=add_dependencies_to_context,
9482
10088
  **kwargs,
9483
10089
  )
@@ -9660,7 +10266,7 @@ class Agent:
9660
10266
  dependencies = run_context.dependencies if run_context else None
9661
10267
 
9662
10268
  if num_documents is None and self.knowledge is not None:
9663
- num_documents = self.knowledge.max_results
10269
+ num_documents = getattr(self.knowledge, "max_results", None)
9664
10270
  # Validate the filters against known valid filter keys
9665
10271
  if self.knowledge is not None and filters is not None:
9666
10272
  if validate_filters:
@@ -9701,22 +10307,22 @@ class Agent:
9701
10307
  log_warning(f"Knowledge retriever failed: {e}")
9702
10308
  raise e
9703
10309
 
9704
- # Use knowledge base search
10310
+ # Use knowledge protocol's retrieve method
9705
10311
  try:
9706
- if self.knowledge is None or (
9707
- (getattr(self.knowledge, "vector_db", None)) is None
9708
- and getattr(self.knowledge, "knowledge_retriever", None) is None
9709
- ):
10312
+ if self.knowledge is None:
10313
+ return None
10314
+
10315
+ # Use protocol retrieve() method if available
10316
+ retrieve_fn = getattr(self.knowledge, "retrieve", None)
10317
+ if not callable(retrieve_fn):
10318
+ log_debug("Knowledge does not implement retrieve()")
9710
10319
  return None
9711
10320
 
9712
10321
  if num_documents is None:
9713
- if isinstance(self.knowledge, Knowledge):
9714
- num_documents = self.knowledge.max_results
10322
+ num_documents = getattr(self.knowledge, "max_results", 10)
9715
10323
 
9716
- log_debug(f"Searching knowledge base with filters: {filters}")
9717
- relevant_docs: List[Document] = self.knowledge.search(
9718
- query=query, max_results=num_documents, filters=filters
9719
- )
10324
+ log_debug(f"Retrieving from knowledge base with filters: {filters}")
10325
+ relevant_docs: List[Document] = retrieve_fn(query=query, max_results=num_documents, filters=filters)
9720
10326
 
9721
10327
  if not relevant_docs or len(relevant_docs) == 0:
9722
10328
  log_debug("No relevant documents found for query")
@@ -9724,7 +10330,7 @@ class Agent:
9724
10330
 
9725
10331
  return [doc.to_dict() for doc in relevant_docs]
9726
10332
  except Exception as e:
9727
- log_warning(f"Error searching knowledge base: {e}")
10333
+ log_warning(f"Error retrieving from knowledge base: {e}")
9728
10334
  raise e
9729
10335
 
9730
10336
  async def aget_relevant_docs_from_knowledge(
@@ -9743,12 +10349,12 @@ class Agent:
9743
10349
  dependencies = run_context.dependencies if run_context else None
9744
10350
 
9745
10351
  if num_documents is None and self.knowledge is not None:
9746
- num_documents = self.knowledge.max_results
10352
+ num_documents = getattr(self.knowledge, "max_results", None)
9747
10353
 
9748
10354
  # Validate the filters against known valid filter keys
9749
10355
  if self.knowledge is not None and filters is not None:
9750
10356
  if validate_filters:
9751
- valid_filters, invalid_keys = await self.knowledge.async_validate_filters(filters) # type: ignore
10357
+ valid_filters, invalid_keys = await self.knowledge.avalidate_filters(filters) # type: ignore
9752
10358
 
9753
10359
  # Warn about invalid filter keys
9754
10360
  if invalid_keys: # type: ignore
@@ -9789,21 +10395,32 @@ class Agent:
9789
10395
  log_warning(f"Knowledge retriever failed: {e}")
9790
10396
  raise e
9791
10397
 
9792
- # Use knowledge base search
10398
+ # Use knowledge protocol's retrieve method
9793
10399
  try:
9794
- if self.knowledge is None or (
9795
- getattr(self.knowledge, "vector_db", None) is None
9796
- and getattr(self.knowledge, "knowledge_retriever", None) is None
9797
- ):
10400
+ if self.knowledge is None:
10401
+ return None
10402
+
10403
+ # Use protocol aretrieve() or retrieve() method if available
10404
+ aretrieve_fn = getattr(self.knowledge, "aretrieve", None)
10405
+ retrieve_fn = getattr(self.knowledge, "retrieve", None)
10406
+
10407
+ if not callable(aretrieve_fn) and not callable(retrieve_fn):
10408
+ log_debug("Knowledge does not implement retrieve()")
9798
10409
  return None
9799
10410
 
9800
10411
  if num_documents is None:
9801
- num_documents = self.knowledge.max_results
10412
+ num_documents = getattr(self.knowledge, "max_results", 10)
9802
10413
 
9803
- log_debug(f"Searching knowledge base with filters: {filters}")
9804
- relevant_docs: List[Document] = await self.knowledge.async_search(
9805
- query=query, max_results=num_documents, filters=filters
9806
- )
10414
+ log_debug(f"Retrieving from knowledge base with filters: {filters}")
10415
+
10416
+ if callable(aretrieve_fn):
10417
+ relevant_docs: List[Document] = await aretrieve_fn(
10418
+ query=query, max_results=num_documents, filters=filters
10419
+ )
10420
+ elif callable(retrieve_fn):
10421
+ relevant_docs = retrieve_fn(query=query, max_results=num_documents, filters=filters)
10422
+ else:
10423
+ return None
9807
10424
 
9808
10425
  if not relevant_docs or len(relevant_docs) == 0:
9809
10426
  log_debug("No relevant documents found for query")
@@ -9811,7 +10428,7 @@ class Agent:
9811
10428
 
9812
10429
  return [doc.to_dict() for doc in relevant_docs]
9813
10430
  except Exception as e:
9814
- log_warning(f"Error searching knowledge base: {e}")
10431
+ log_warning(f"Error retrieving from knowledge base: {e}")
9815
10432
  raise e
9816
10433
 
9817
10434
  def _convert_documents_to_string(self, docs: List[Union[Dict[str, Any], str]]) -> str:
@@ -9877,18 +10494,30 @@ class Agent:
9877
10494
  fields_for_new_agent: Dict[str, Any] = {}
9878
10495
 
9879
10496
  for f in fields(self):
10497
+ # Skip private fields (not part of __init__ signature)
10498
+ if f.name.startswith("_"):
10499
+ continue
10500
+
9880
10501
  field_value = getattr(self, f.name)
9881
10502
  if field_value is not None:
9882
- fields_for_new_agent[f.name] = self._deep_copy_field(f.name, field_value)
10503
+ try:
10504
+ fields_for_new_agent[f.name] = self._deep_copy_field(f.name, field_value)
10505
+ except Exception as e:
10506
+ log_warning(f"Failed to deep copy field '{f.name}': {e}. Using original value.")
10507
+ fields_for_new_agent[f.name] = field_value
9883
10508
 
9884
10509
  # Update fields if provided
9885
10510
  if update:
9886
10511
  fields_for_new_agent.update(update)
9887
10512
 
9888
10513
  # Create a new Agent
9889
- new_agent = self.__class__(**fields_for_new_agent)
9890
- log_debug(f"Created new {self.__class__.__name__}")
9891
- return new_agent
10514
+ try:
10515
+ new_agent = self.__class__(**fields_for_new_agent)
10516
+ log_debug(f"Created new {self.__class__.__name__}")
10517
+ return new_agent
10518
+ except Exception as e:
10519
+ log_error(f"Failed to create deep copy of {self.__class__.__name__}: {e}")
10520
+ raise
9892
10521
 
9893
10522
  def _deep_copy_field(self, field_name: str, field_value: Any) -> Any:
9894
10523
  """Helper method to deep copy a field based on its type."""
@@ -9898,19 +10527,52 @@ class Agent:
9898
10527
  if field_name == "reasoning_agent":
9899
10528
  return field_value.deep_copy()
9900
10529
 
9901
- # For storage, model and reasoning_model, use a deep copy
9902
- elif field_name in ("db", "model", "reasoning_model"):
10530
+ # For tools, share MCP tools but copy others
10531
+ if field_name == "tools" and field_value is not None:
9903
10532
  try:
9904
- return deepcopy(field_value)
9905
- except Exception:
9906
- try:
9907
- return copy(field_value)
9908
- except Exception as e:
9909
- log_warning(f"Failed to copy field: {field_name} - {e}")
9910
- return field_value
10533
+ copied_tools = []
10534
+ for tool in field_value:
10535
+ try:
10536
+ # Share MCP tools (they maintain server connections)
10537
+ is_mcp_tool = hasattr(type(tool), "__mro__") and any(
10538
+ c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
10539
+ )
10540
+ if is_mcp_tool:
10541
+ copied_tools.append(tool)
10542
+ else:
10543
+ try:
10544
+ copied_tools.append(deepcopy(tool))
10545
+ except Exception:
10546
+ # Tool can't be deep copied, share by reference
10547
+ copied_tools.append(tool)
10548
+ except Exception:
10549
+ # MCP detection failed, share tool by reference to be safe
10550
+ copied_tools.append(tool)
10551
+ return copied_tools
10552
+ except Exception as e:
10553
+ # If entire tools processing fails, log and return original list
10554
+ log_warning(f"Failed to process tools for deep copy: {e}")
10555
+ return field_value
10556
+
10557
+ # Share heavy resources - these maintain connections/pools that shouldn't be duplicated
10558
+ if field_name in (
10559
+ "db",
10560
+ "model",
10561
+ "reasoning_model",
10562
+ "knowledge",
10563
+ "memory_manager",
10564
+ "parser_model",
10565
+ "output_model",
10566
+ "session_summary_manager",
10567
+ "culture_manager",
10568
+ "compression_manager",
10569
+ "learning",
10570
+ "skills",
10571
+ ):
10572
+ return field_value
9911
10573
 
9912
10574
  # For compound types, attempt a deep copy
9913
- elif isinstance(field_value, (list, dict, set)):
10575
+ if isinstance(field_value, (list, dict, set)):
9914
10576
  try:
9915
10577
  return deepcopy(field_value)
9916
10578
  except Exception:
@@ -9921,7 +10583,7 @@ class Agent:
9921
10583
  return field_value
9922
10584
 
9923
10585
  # For pydantic models, attempt a model_copy
9924
- elif isinstance(field_value, BaseModel):
10586
+ if isinstance(field_value, BaseModel):
9925
10587
  try:
9926
10588
  return field_value.model_copy(deep=True)
9927
10589
  except Exception:
@@ -9933,8 +10595,6 @@ class Agent:
9933
10595
 
9934
10596
  # For other types, attempt a shallow copy first
9935
10597
  try:
9936
- from copy import copy
9937
-
9938
10598
  return copy(field_value)
9939
10599
  except Exception:
9940
10600
  # If copy fails, return as is
@@ -9997,40 +10657,56 @@ class Agent:
9997
10657
  # Reasoning
9998
10658
  ###########################################################################
9999
10659
 
10000
- def _handle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
10660
+ def _handle_reasoning(
10661
+ self, run_response: RunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
10662
+ ) -> None:
10001
10663
  if self.reasoning or self.reasoning_model is not None:
10002
10664
  reasoning_generator = self._reason(
10003
- run_response=run_response, run_messages=run_messages, stream_events=False
10665
+ run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
10004
10666
  )
10005
10667
 
10006
10668
  # Consume the generator without yielding
10007
10669
  deque(reasoning_generator, maxlen=0)
10008
10670
 
10009
10671
  def _handle_reasoning_stream(
10010
- self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
10672
+ self,
10673
+ run_response: RunOutput,
10674
+ run_messages: RunMessages,
10675
+ run_context: Optional[RunContext] = None,
10676
+ stream_events: Optional[bool] = None,
10011
10677
  ) -> Iterator[RunOutputEvent]:
10012
10678
  if self.reasoning or self.reasoning_model is not None:
10013
10679
  reasoning_generator = self._reason(
10014
10680
  run_response=run_response,
10015
10681
  run_messages=run_messages,
10682
+ run_context=run_context,
10016
10683
  stream_events=stream_events,
10017
10684
  )
10018
10685
  yield from reasoning_generator
10019
10686
 
10020
- async def _ahandle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
10687
+ async def _ahandle_reasoning(
10688
+ self, run_response: RunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
10689
+ ) -> None:
10021
10690
  if self.reasoning or self.reasoning_model is not None:
10022
- reason_generator = self._areason(run_response=run_response, run_messages=run_messages, stream_events=False)
10691
+ reason_generator = self._areason(
10692
+ run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
10693
+ )
10023
10694
  # Consume the generator without yielding
10024
10695
  async for _ in reason_generator: # type: ignore
10025
10696
  pass
10026
10697
 
10027
10698
  async def _ahandle_reasoning_stream(
10028
- self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
10699
+ self,
10700
+ run_response: RunOutput,
10701
+ run_messages: RunMessages,
10702
+ run_context: Optional[RunContext] = None,
10703
+ stream_events: Optional[bool] = None,
10029
10704
  ) -> AsyncIterator[RunOutputEvent]:
10030
10705
  if self.reasoning or self.reasoning_model is not None:
10031
10706
  reason_generator = self._areason(
10032
10707
  run_response=run_response,
10033
10708
  run_messages=run_messages,
10709
+ run_context=run_context,
10034
10710
  stream_events=stream_events,
10035
10711
  )
10036
10712
  async for item in reason_generator: # type: ignore
@@ -10142,7 +10818,11 @@ class Agent:
10142
10818
  log_warning(f"Reasoning error. {event.error}, continuing regular session...")
10143
10819
 
10144
10820
  def _reason(
10145
- self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
10821
+ self,
10822
+ run_response: RunOutput,
10823
+ run_messages: RunMessages,
10824
+ run_context: Optional[RunContext] = None,
10825
+ stream_events: Optional[bool] = None,
10146
10826
  ) -> Iterator[RunOutputEvent]:
10147
10827
  """
10148
10828
  Run reasoning using the ReasoningManager.
@@ -10172,9 +10852,7 @@ class Agent:
10172
10852
  telemetry=self.telemetry,
10173
10853
  debug_mode=self.debug_mode,
10174
10854
  debug_level=self.debug_level,
10175
- session_state=self.session_state,
10176
- dependencies=self.dependencies,
10177
- metadata=self.metadata,
10855
+ run_context=run_context,
10178
10856
  )
10179
10857
  )
10180
10858
 
@@ -10183,7 +10861,11 @@ class Agent:
10183
10861
  yield from self._handle_reasoning_event(event, run_response, stream_events)
10184
10862
 
10185
10863
  async def _areason(
10186
- self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
10864
+ self,
10865
+ run_response: RunOutput,
10866
+ run_messages: RunMessages,
10867
+ run_context: Optional[RunContext] = None,
10868
+ stream_events: Optional[bool] = None,
10187
10869
  ) -> Any:
10188
10870
  """
10189
10871
  Run reasoning asynchronously using the ReasoningManager.
@@ -10213,9 +10895,7 @@ class Agent:
10213
10895
  telemetry=self.telemetry,
10214
10896
  debug_mode=self.debug_mode,
10215
10897
  debug_level=self.debug_level,
10216
- session_state=self.session_state,
10217
- dependencies=self.dependencies,
10218
- metadata=self.metadata,
10898
+ run_context=run_context,
10219
10899
  )
10220
10900
  )
10221
10901
 
@@ -10620,6 +11300,118 @@ class Agent:
10620
11300
  name="create_or_update_cultural_knowledge",
10621
11301
  )
10622
11302
 
11303
+ def _create_knowledge_retriever_search_tool(
11304
+ self,
11305
+ run_response: Optional[RunOutput] = None,
11306
+ run_context: Optional[RunContext] = None,
11307
+ async_mode: bool = False,
11308
+ ) -> Function:
11309
+ """Create a search_knowledge_base tool using the custom knowledge_retriever.
11310
+
11311
+ This allows agents to use a custom retriever function without needing
11312
+ a full Knowledge instance. The retriever is wrapped as a tool the agent can call.
11313
+ """
11314
+ from agno.models.message import MessageReferences
11315
+ from agno.utils.timer import Timer
11316
+
11317
+ def search_knowledge_base(query: str) -> str:
11318
+ """Use this function to search the knowledge base for information about a query.
11319
+
11320
+ Args:
11321
+ query: The query to search for.
11322
+
11323
+ Returns:
11324
+ str: A string containing the response from the knowledge base.
11325
+ """
11326
+ retrieval_timer = Timer()
11327
+ retrieval_timer.start()
11328
+
11329
+ try:
11330
+ docs = self.get_relevant_docs_from_knowledge(
11331
+ query=query,
11332
+ run_context=run_context,
11333
+ )
11334
+ except Exception as e:
11335
+ log_warning(f"Knowledge retriever failed: {e}")
11336
+ return f"Error searching knowledge base: {e}"
11337
+
11338
+ if run_response is not None and docs:
11339
+ references = MessageReferences(
11340
+ query=query,
11341
+ references=docs,
11342
+ time=round(retrieval_timer.elapsed, 4),
11343
+ )
11344
+ if run_response.references is None:
11345
+ run_response.references = []
11346
+ run_response.references.append(references)
11347
+
11348
+ retrieval_timer.stop()
11349
+ log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
11350
+
11351
+ if not docs:
11352
+ return "No documents found"
11353
+
11354
+ # Format results for the agent
11355
+ if self.references_format == "json":
11356
+ import json
11357
+
11358
+ return json.dumps(docs, indent=2, default=str)
11359
+ else:
11360
+ import yaml
11361
+
11362
+ return yaml.dump(docs, default_flow_style=False)
11363
+
11364
+ async def asearch_knowledge_base(query: str) -> str:
11365
+ """Use this function to search the knowledge base for information about a query.
11366
+
11367
+ Args:
11368
+ query: The query to search for.
11369
+
11370
+ Returns:
11371
+ str: A string containing the response from the knowledge base.
11372
+ """
11373
+ retrieval_timer = Timer()
11374
+ retrieval_timer.start()
11375
+
11376
+ try:
11377
+ docs = await self.aget_relevant_docs_from_knowledge(
11378
+ query=query,
11379
+ run_context=run_context,
11380
+ )
11381
+ except Exception as e:
11382
+ log_warning(f"Knowledge retriever failed: {e}")
11383
+ return f"Error searching knowledge base: {e}"
11384
+
11385
+ if run_response is not None and docs:
11386
+ references = MessageReferences(
11387
+ query=query,
11388
+ references=docs,
11389
+ time=round(retrieval_timer.elapsed, 4),
11390
+ )
11391
+ if run_response.references is None:
11392
+ run_response.references = []
11393
+ run_response.references.append(references)
11394
+
11395
+ retrieval_timer.stop()
11396
+ log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
11397
+
11398
+ if not docs:
11399
+ return "No documents found"
11400
+
11401
+ # Format results for the agent
11402
+ if self.references_format == "json":
11403
+ import json
11404
+
11405
+ return json.dumps(docs, indent=2, default=str)
11406
+ else:
11407
+ import yaml
11408
+
11409
+ return yaml.dump(docs, default_flow_style=False)
11410
+
11411
+ if async_mode:
11412
+ return Function.from_callable(asearch_knowledge_base, name="search_knowledge_base")
11413
+ return Function.from_callable(search_knowledge_base, name="search_knowledge_base")
11414
+
10623
11415
  def _get_chat_history_function(self, session: AgentSession) -> Callable:
10624
11416
  def get_chat_history(num_chats: Optional[int] = None) -> str:
10625
11417
  """Use this function to get the chat history between the user and agent.
@@ -10879,12 +11671,18 @@ class Agent:
10879
11671
 
10880
11672
  if self.knowledge is None:
10881
11673
  return "Knowledge not available"
11674
+
11675
+ # Check if knowledge supports insert
11676
+ insert_fn = getattr(self.knowledge, "insert", None)
11677
+ if not callable(insert_fn):
11678
+ return "Knowledge does not support insert"
11679
+
10882
11680
  document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
10883
11681
  document_content = json.dumps({"query": query, "result": result})
10884
11682
  log_info(f"Adding document to Knowledge: {document_name}: {document_content}")
10885
11683
  from agno.knowledge.reader.text_reader import TextReader
10886
11684
 
10887
- self.knowledge.add_content(name=document_name, text_content=document_content, reader=TextReader())
11685
+ insert_fn(name=document_name, text_content=document_content, reader=TextReader())
10888
11686
  return "Successfully added to knowledge base"
10889
11687
 
10890
11688
  def _get_previous_sessions_messages_function(
@@ -11573,7 +12371,7 @@ class Agent:
11573
12371
  "parser_model": self.parser_model.to_dict() if self.parser_model else None,
11574
12372
  "output_model": self.output_model.to_dict() if self.output_model else None,
11575
12373
  "has_tools": self.tools is not None,
11576
- "has_memory": self.enable_user_memories is True
12374
+ "has_memory": self.update_memory_on_run is True
11577
12375
  or self.enable_agentic_memory is True
11578
12376
  or self.memory_manager is not None,
11579
12377
  "has_learnings": self._learning is not None,
@@ -11627,3 +12425,75 @@ class Agent:
11627
12425
 
11628
12426
  except Exception as e:
11629
12427
  log_debug(f"Could not create Agent run telemetry event: {e}")
12428
+
12429
+
12430
+ def get_agent_by_id(
12431
+ db: "BaseDb",
12432
+ id: str,
12433
+ version: Optional[int] = None,
12434
+ label: Optional[str] = None,
12435
+ registry: Optional["Registry"] = None,
12436
+ ) -> Optional["Agent"]:
12437
+ """
12438
+ Get an Agent by id from the database (new entities/configs schema).
12439
+
12440
+ Resolution order:
12441
+ - if label is provided: load that labeled version
12442
+ - else: load component.current_version
12443
+
12444
+ Args:
12445
+ db: Database handle.
12446
+ id: Agent entity_id.
12447
+ label: Optional label.
12448
+ registry: Optional Registry for reconstructing unserializable components.
12449
+
12450
+ Returns:
12451
+ Agent instance or None.
12452
+ """
12453
+ try:
12454
+ row = db.get_config(component_id=id, label=label, version=version)
12455
+ if row is None:
12456
+ return None
12457
+
12458
+ cfg = row.get("config") if isinstance(row, dict) else None
12459
+ if cfg is None:
12460
+ raise ValueError(f"Invalid config found for agent {id}")
12461
+
12462
+ agent = Agent.from_dict(cfg, registry=registry)
12463
+ agent.id = id
12464
+
12465
+ return agent
12466
+
12467
+ except Exception as e:
12468
+ log_error(f"Error loading Agent {id} from database: {e}")
12469
+ return None
12470
+
12471
+
12472
+ def get_agents(
12473
+ db: "BaseDb",
12474
+ registry: Optional["Registry"] = None,
12475
+ ) -> List["Agent"]:
12476
+ """
12477
+ Get all agents from the database.
12478
+ """
12479
+ agents: List[Agent] = []
12480
+ try:
12481
+ components, _ = db.list_components(component_type=ComponentType.AGENT)
12482
+ for component in components:
12483
+ config = db.get_config(component_id=component["component_id"])
12484
+ if config is not None:
12485
+ agent_config = config.get("config")
12486
+ if agent_config is not None:
12487
+ component_id = component["component_id"]
12488
+ if "id" not in agent_config:
12489
+ agent_config["id"] = component_id
12490
+ agent = Agent.from_dict(agent_config, registry=registry)
12491
+ # Ensure agent.id is set to the component_id (the id used to load the agent)
12492
+ # This ensures events use the correct agent_id
12493
+ agent.id = component_id
12494
+ agents.append(agent)
12495
+ return agents
12496
+
12497
+ except Exception as e:
12498
+ log_error(f"Error loading Agents from database: {e}")
12499
+ return []