agno 2.3.25__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. agno/agent/__init__.py +4 -0
  2. agno/agent/agent.py +1428 -558
  3. agno/agent/remote.py +13 -0
  4. agno/db/base.py +339 -0
  5. agno/db/postgres/async_postgres.py +116 -12
  6. agno/db/postgres/postgres.py +1229 -25
  7. agno/db/postgres/schemas.py +48 -1
  8. agno/db/sqlite/async_sqlite.py +119 -4
  9. agno/db/sqlite/schemas.py +51 -0
  10. agno/db/sqlite/sqlite.py +1173 -13
  11. agno/db/utils.py +37 -1
  12. agno/knowledge/__init__.py +4 -0
  13. agno/knowledge/chunking/code.py +1 -1
  14. agno/knowledge/chunking/semantic.py +1 -1
  15. agno/knowledge/chunking/strategy.py +4 -0
  16. agno/knowledge/filesystem.py +412 -0
  17. agno/knowledge/knowledge.py +2767 -2254
  18. agno/knowledge/protocol.py +134 -0
  19. agno/knowledge/reader/arxiv_reader.py +2 -2
  20. agno/knowledge/reader/base.py +9 -7
  21. agno/knowledge/reader/csv_reader.py +5 -5
  22. agno/knowledge/reader/docx_reader.py +2 -2
  23. agno/knowledge/reader/field_labeled_csv_reader.py +2 -2
  24. agno/knowledge/reader/firecrawl_reader.py +2 -2
  25. agno/knowledge/reader/json_reader.py +2 -2
  26. agno/knowledge/reader/markdown_reader.py +2 -2
  27. agno/knowledge/reader/pdf_reader.py +5 -4
  28. agno/knowledge/reader/pptx_reader.py +2 -2
  29. agno/knowledge/reader/reader_factory.py +110 -0
  30. agno/knowledge/reader/s3_reader.py +2 -2
  31. agno/knowledge/reader/tavily_reader.py +2 -2
  32. agno/knowledge/reader/text_reader.py +2 -2
  33. agno/knowledge/reader/web_search_reader.py +2 -2
  34. agno/knowledge/reader/website_reader.py +5 -3
  35. agno/knowledge/reader/wikipedia_reader.py +2 -2
  36. agno/knowledge/reader/youtube_reader.py +2 -2
  37. agno/knowledge/utils.py +37 -29
  38. agno/learn/__init__.py +6 -0
  39. agno/learn/machine.py +35 -0
  40. agno/learn/schemas.py +82 -11
  41. agno/learn/stores/__init__.py +3 -0
  42. agno/learn/stores/decision_log.py +1156 -0
  43. agno/learn/stores/learned_knowledge.py +6 -6
  44. agno/models/anthropic/claude.py +24 -0
  45. agno/models/aws/bedrock.py +20 -0
  46. agno/models/base.py +48 -4
  47. agno/models/cohere/chat.py +25 -0
  48. agno/models/google/gemini.py +50 -5
  49. agno/models/litellm/chat.py +38 -0
  50. agno/models/openai/chat.py +7 -0
  51. agno/models/openrouter/openrouter.py +46 -0
  52. agno/models/response.py +16 -0
  53. agno/os/app.py +83 -44
  54. agno/os/middleware/__init__.py +2 -0
  55. agno/os/middleware/trailing_slash.py +27 -0
  56. agno/os/router.py +1 -0
  57. agno/os/routers/agents/router.py +29 -16
  58. agno/os/routers/agents/schema.py +6 -4
  59. agno/os/routers/components/__init__.py +3 -0
  60. agno/os/routers/components/components.py +466 -0
  61. agno/os/routers/evals/schemas.py +4 -3
  62. agno/os/routers/health.py +3 -3
  63. agno/os/routers/knowledge/knowledge.py +3 -3
  64. agno/os/routers/memory/schemas.py +4 -2
  65. agno/os/routers/metrics/metrics.py +9 -11
  66. agno/os/routers/metrics/schemas.py +10 -6
  67. agno/os/routers/registry/__init__.py +3 -0
  68. agno/os/routers/registry/registry.py +337 -0
  69. agno/os/routers/teams/router.py +20 -8
  70. agno/os/routers/teams/schema.py +6 -4
  71. agno/os/routers/traces/traces.py +5 -5
  72. agno/os/routers/workflows/router.py +38 -11
  73. agno/os/routers/workflows/schema.py +1 -1
  74. agno/os/schema.py +92 -26
  75. agno/os/utils.py +133 -16
  76. agno/reasoning/anthropic.py +2 -2
  77. agno/reasoning/azure_ai_foundry.py +2 -2
  78. agno/reasoning/deepseek.py +2 -2
  79. agno/reasoning/default.py +6 -7
  80. agno/reasoning/gemini.py +2 -2
  81. agno/reasoning/helpers.py +6 -7
  82. agno/reasoning/manager.py +4 -10
  83. agno/reasoning/ollama.py +2 -2
  84. agno/reasoning/openai.py +2 -2
  85. agno/reasoning/vertexai.py +2 -2
  86. agno/registry/__init__.py +3 -0
  87. agno/registry/registry.py +68 -0
  88. agno/run/agent.py +57 -0
  89. agno/run/base.py +7 -0
  90. agno/run/team.py +57 -0
  91. agno/skills/agent_skills.py +10 -3
  92. agno/team/__init__.py +3 -1
  93. agno/team/team.py +1276 -326
  94. agno/tools/duckduckgo.py +25 -71
  95. agno/tools/exa.py +0 -21
  96. agno/tools/function.py +35 -83
  97. agno/tools/knowledge.py +9 -4
  98. agno/tools/mem0.py +11 -10
  99. agno/tools/memory.py +47 -46
  100. agno/tools/parallel.py +0 -7
  101. agno/tools/reasoning.py +30 -23
  102. agno/tools/tavily.py +4 -1
  103. agno/tools/websearch.py +93 -0
  104. agno/tools/website.py +1 -1
  105. agno/tools/wikipedia.py +1 -1
  106. agno/tools/workflow.py +48 -47
  107. agno/utils/agent.py +42 -5
  108. agno/utils/events.py +160 -2
  109. agno/utils/print_response/agent.py +0 -31
  110. agno/utils/print_response/team.py +0 -2
  111. agno/utils/print_response/workflow.py +0 -2
  112. agno/utils/team.py +61 -11
  113. agno/vectordb/lancedb/lance_db.py +4 -1
  114. agno/vectordb/mongodb/mongodb.py +1 -1
  115. agno/vectordb/qdrant/qdrant.py +4 -4
  116. agno/workflow/__init__.py +3 -1
  117. agno/workflow/condition.py +0 -21
  118. agno/workflow/loop.py +0 -21
  119. agno/workflow/parallel.py +0 -21
  120. agno/workflow/router.py +0 -21
  121. agno/workflow/step.py +117 -24
  122. agno/workflow/steps.py +0 -21
  123. agno/workflow/workflow.py +625 -63
  124. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/METADATA +46 -76
  125. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/RECORD +128 -117
  126. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/WHEEL +0 -0
  127. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/licenses/LICENSE +0 -0
  128. {agno-2.3.25.dist-info → agno-2.4.0.dist-info}/top_level.txt +0 -0
agno/tools/workflow.py CHANGED
@@ -4,6 +4,7 @@ from typing import Any, Dict, Optional
4
4
 
5
5
  from pydantic import BaseModel, Field
6
6
 
7
+ from agno.run import RunContext
7
8
  from agno.tools import Toolkit
8
9
  from agno.utils.log import log_debug, log_error
9
10
  from agno.workflow.workflow import Workflow, WorkflowRunOutput
@@ -65,7 +66,7 @@ class WorkflowTools(Toolkit):
65
66
  else:
66
67
  self.register(self.analyze, name="analyze")
67
68
 
68
- def think(self, session_state: Dict[str, Any], thought: str) -> str:
69
+ def think(self, run_context: RunContext, thought: str) -> str:
69
70
  """Use this tool as a scratchpad to reason about the workflow execution, refine your approach, brainstorm workflow inputs, or revise your plan.
70
71
  Call `Think` whenever you need to figure out what to do next, analyze the user's requirements, plan workflow inputs, or decide on execution strategy.
71
72
  You should use this tool as frequently as needed.
@@ -76,14 +77,14 @@ class WorkflowTools(Toolkit):
76
77
  log_debug(f"Workflow Thought: {thought}")
77
78
 
78
79
  # Add the thought to the session state
79
- if session_state is None:
80
- session_state = {}
81
- if "workflow_thoughts" not in session_state:
82
- session_state["workflow_thoughts"] = []
83
- session_state["workflow_thoughts"].append(thought)
80
+ if run_context.session_state is None:
81
+ run_context.session_state = {}
82
+ if "workflow_thoughts" not in run_context.session_state:
83
+ run_context.session_state["workflow_thoughts"] = []
84
+ run_context.session_state["workflow_thoughts"].append(thought)
84
85
 
85
86
  # Return the full log of thoughts and the new thought
86
- thoughts = "\n".join([f"- {t}" for t in session_state["workflow_thoughts"]])
87
+ thoughts = "\n".join([f"- {t}" for t in run_context.session_state["workflow_thoughts"]])
87
88
  formatted_thoughts = dedent(
88
89
  f"""Workflow Thoughts:
89
90
  {thoughts}
@@ -94,7 +95,7 @@ class WorkflowTools(Toolkit):
94
95
  log_error(f"Error recording workflow thought: {e}")
95
96
  return f"Error recording workflow thought: {e}"
96
97
 
97
- async def async_think(self, session_state: Dict[str, Any], thought: str) -> str:
98
+ async def async_think(self, run_context: RunContext, thought: str) -> str:
98
99
  """Use this tool as a scratchpad to reason about the workflow execution, refine your approach, brainstorm workflow inputs, or revise your plan.
99
100
  Call `Think` whenever you need to figure out what to do next, analyze the user's requirements, plan workflow inputs, or decide on execution strategy.
100
101
  You should use this tool as frequently as needed.
@@ -105,14 +106,14 @@ class WorkflowTools(Toolkit):
105
106
  log_debug(f"Workflow Thought: {thought}")
106
107
 
107
108
  # Add the thought to the session state
108
- if session_state is None:
109
- session_state = {}
110
- if "workflow_thoughts" not in session_state:
111
- session_state["workflow_thoughts"] = []
112
- session_state["workflow_thoughts"].append(thought)
109
+ if run_context.session_state is None:
110
+ run_context.session_state = {}
111
+ if "workflow_thoughts" not in run_context.session_state:
112
+ run_context.session_state["workflow_thoughts"] = []
113
+ run_context.session_state["workflow_thoughts"].append(thought)
113
114
 
114
115
  # Return the full log of thoughts and the new thought
115
- thoughts = "\n".join([f"- {t}" for t in session_state["workflow_thoughts"]])
116
+ thoughts = "\n".join([f"- {t}" for t in run_context.session_state["workflow_thoughts"]])
116
117
  formatted_thoughts = dedent(
117
118
  f"""Workflow Thoughts:
118
119
  {thoughts}
@@ -125,7 +126,7 @@ class WorkflowTools(Toolkit):
125
126
 
126
127
  def run_workflow(
127
128
  self,
128
- session_state: Dict[str, Any],
129
+ run_context: RunContext,
129
130
  input: RunWorkflowInput,
130
131
  ) -> str:
131
132
  """Use this tool to execute the workflow with the specified inputs and parameters.
@@ -140,22 +141,22 @@ class WorkflowTools(Toolkit):
140
141
  try:
141
142
  log_debug(f"Running workflow with input: {input.input_data}")
142
143
 
143
- user_id = session_state.get("current_user_id")
144
- session_id = session_state.get("current_session_id")
144
+ if run_context.session_state is None:
145
+ run_context.session_state = {}
145
146
 
146
147
  # Execute the workflow
147
148
  result: WorkflowRunOutput = self.workflow.run(
148
149
  input=input.input_data,
149
- user_id=user_id,
150
- session_id=session_id,
151
- session_state=session_state,
150
+ user_id=run_context.user_id,
151
+ session_id=run_context.session_id,
152
+ session_state=run_context.session_state,
152
153
  additional_data=input.additional_data,
153
154
  )
154
155
 
155
- if "workflow_results" not in session_state:
156
- session_state["workflow_results"] = []
156
+ if "workflow_results" not in run_context.session_state:
157
+ run_context.session_state["workflow_results"] = []
157
158
 
158
- session_state["workflow_results"].append(result.to_dict())
159
+ run_context.session_state["workflow_results"].append(result.to_dict())
159
160
 
160
161
  return json.dumps(result.to_dict(), indent=2)
161
162
 
@@ -165,7 +166,7 @@ class WorkflowTools(Toolkit):
165
166
 
166
167
  async def async_run_workflow(
167
168
  self,
168
- session_state: Dict[str, Any],
169
+ run_context: RunContext,
169
170
  input: RunWorkflowInput,
170
171
  ) -> str:
171
172
  """Use this tool to execute the workflow with the specified inputs and parameters.
@@ -180,22 +181,22 @@ class WorkflowTools(Toolkit):
180
181
  try:
181
182
  log_debug(f"Running workflow with input: {input.input_data}")
182
183
 
183
- user_id = session_state.get("current_user_id")
184
- session_id = session_state.get("current_session_id")
184
+ if run_context.session_state is None:
185
+ run_context.session_state = {}
185
186
 
186
187
  # Execute the workflow
187
188
  result: WorkflowRunOutput = await self.workflow.arun(
188
189
  input=input.input_data,
189
- user_id=user_id,
190
- session_id=session_id,
191
- session_state=session_state,
190
+ user_id=run_context.user_id,
191
+ session_id=run_context.session_id,
192
+ session_state=run_context.session_state,
192
193
  additional_data=input.additional_data,
193
194
  )
194
195
 
195
- if "workflow_results" not in session_state:
196
- session_state["workflow_results"] = []
196
+ if "workflow_results" not in run_context.session_state:
197
+ run_context.session_state["workflow_results"] = []
197
198
 
198
- session_state["workflow_results"].append(result.to_dict())
199
+ run_context.session_state["workflow_results"].append(result.to_dict())
199
200
 
200
201
  return json.dumps(result.to_dict(), indent=2)
201
202
 
@@ -203,7 +204,7 @@ class WorkflowTools(Toolkit):
203
204
  log_error(f"Error running workflow: {e}")
204
205
  return f"Error running workflow: {e}"
205
206
 
206
- def analyze(self, session_state: Dict[str, Any], analysis: str) -> str:
207
+ def analyze(self, run_context: RunContext, analysis: str) -> str:
207
208
  """Use this tool to evaluate whether the workflow execution results are correct and sufficient.
208
209
  If not, go back to "Think" or "Run" with refined inputs or parameters.
209
210
  Args:
@@ -213,14 +214,14 @@ class WorkflowTools(Toolkit):
213
214
  log_debug(f"Workflow Analysis: {analysis}")
214
215
 
215
216
  # Add the analysis to the session state
216
- if session_state is None:
217
- session_state = {}
218
- if "workflow_analysis" not in session_state:
219
- session_state["workflow_analysis"] = []
220
- session_state["workflow_analysis"].append(analysis)
217
+ if run_context.session_state is None:
218
+ run_context.session_state = {}
219
+ if "workflow_analysis" not in run_context.session_state:
220
+ run_context.session_state["workflow_analysis"] = []
221
+ run_context.session_state["workflow_analysis"].append(analysis)
221
222
 
222
223
  # Return the full log of analysis and the new analysis
223
- analysis_log = "\n".join([f"- {a}" for a in session_state["workflow_analysis"]])
224
+ analysis_log = "\n".join([f"- {a}" for a in run_context.session_state["workflow_analysis"]])
224
225
  formatted_analysis = dedent(
225
226
  f"""Workflow Analysis:
226
227
  {analysis_log}
@@ -231,7 +232,7 @@ class WorkflowTools(Toolkit):
231
232
  log_error(f"Error recording workflow analysis: {e}")
232
233
  return f"Error recording workflow analysis: {e}"
233
234
 
234
- async def async_analyze(self, session_state: Dict[str, Any], analysis: str) -> str:
235
+ async def async_analyze(self, run_context: RunContext, analysis: str) -> str:
235
236
  """Use this tool to evaluate whether the workflow execution results are correct and sufficient.
236
237
  If not, go back to "Think" or "Run" with refined inputs or parameters.
237
238
  Args:
@@ -241,14 +242,14 @@ class WorkflowTools(Toolkit):
241
242
  log_debug(f"Workflow Analysis: {analysis}")
242
243
 
243
244
  # Add the analysis to the session state
244
- if session_state is None:
245
- session_state = {}
246
- if "workflow_analysis" not in session_state:
247
- session_state["workflow_analysis"] = []
248
- session_state["workflow_analysis"].append(analysis)
245
+ if run_context.session_state is None:
246
+ run_context.session_state = {}
247
+ if "workflow_analysis" not in run_context.session_state:
248
+ run_context.session_state["workflow_analysis"] = []
249
+ run_context.session_state["workflow_analysis"].append(analysis)
249
250
 
250
251
  # Return the full log of analysis and the new analysis
251
- analysis_log = "\n".join([f"- {a}" for a in session_state["workflow_analysis"]])
252
+ analysis_log = "\n".join([f"- {a}" for a in run_context.session_state["workflow_analysis"]])
252
253
  formatted_analysis = dedent(
253
254
  f"""Workflow Analysis:
254
255
  {analysis_log}
@@ -262,7 +263,7 @@ class WorkflowTools(Toolkit):
262
263
  DEFAULT_INSTRUCTIONS = dedent("""\
263
264
  You have access to the Think, Run Workflow, and Analyze tools that will help you execute workflows and analyze their results. Use these tools as frequently as needed to successfully complete workflow-based tasks.
264
265
  ## How to use the Think, Run Workflow, and Analyze tools:
265
-
266
+
266
267
  1. **Think**
267
268
  - Purpose: A scratchpad for planning workflow execution, brainstorming inputs, and refining your approach. You never reveal your "Think" content to the user.
268
269
  - Usage: Call `think` whenever you need to figure out what workflow inputs to use, analyze requirements, or decide on execution strategy before (or after) you run the workflow.
agno/utils/agent.py CHANGED
@@ -1,5 +1,19 @@
1
+ import asyncio
1
2
  from asyncio import Future, Task
2
- from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Sequence, Type, Union
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ AsyncIterator,
7
+ Awaitable,
8
+ Callable,
9
+ Dict,
10
+ Iterator,
11
+ List,
12
+ Optional,
13
+ Sequence,
14
+ Type,
15
+ Union,
16
+ )
3
17
 
4
18
  from pydantic import BaseModel
5
19
 
@@ -84,6 +98,7 @@ async def await_for_thread_tasks_stream(
84
98
  stream_events: bool = False,
85
99
  events_to_skip: Optional[List[RunEvent]] = None,
86
100
  store_events: bool = False,
101
+ get_memories_callback: Optional[Callable[[], Union[Optional[List[Any]], Awaitable[Optional[List[Any]]]]]] = None,
87
102
  ) -> AsyncIterator[RunOutputEvent]:
88
103
  if memory_task is not None:
89
104
  if stream_events:
@@ -106,16 +121,29 @@ async def await_for_thread_tasks_stream(
106
121
  except Exception as e:
107
122
  log_warning(f"Error in memory creation: {str(e)}")
108
123
  if stream_events:
124
+ # Get memories after update if callback provided
125
+ memories = None
126
+ if get_memories_callback is not None:
127
+ try:
128
+ result = get_memories_callback()
129
+ # Handle both sync and async callbacks
130
+ if asyncio.iscoroutine(result):
131
+ memories = await result
132
+ else:
133
+ memories = result
134
+ except Exception as e:
135
+ log_warning(f"Error getting memories: {str(e)}")
136
+
109
137
  if isinstance(run_response, TeamRunOutput):
110
138
  yield handle_event( # type: ignore
111
- create_team_memory_update_completed_event(from_run_response=run_response),
139
+ create_team_memory_update_completed_event(from_run_response=run_response, memories=memories),
112
140
  run_response,
113
141
  events_to_skip=events_to_skip, # type: ignore
114
142
  store_events=store_events,
115
143
  )
116
144
  else:
117
145
  yield handle_event( # type: ignore
118
- create_memory_update_completed_event(from_run_response=run_response),
146
+ create_memory_update_completed_event(from_run_response=run_response, memories=memories),
119
147
  run_response,
120
148
  events_to_skip=events_to_skip, # type: ignore
121
149
  store_events=store_events,
@@ -142,6 +170,7 @@ def wait_for_thread_tasks_stream(
142
170
  stream_events: bool = False,
143
171
  events_to_skip: Optional[List[RunEvent]] = None,
144
172
  store_events: bool = False,
173
+ get_memories_callback: Optional[Callable[[], Optional[List[Any]]]] = None,
145
174
  ) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]:
146
175
  if memory_future is not None:
147
176
  if stream_events:
@@ -164,16 +193,24 @@ def wait_for_thread_tasks_stream(
164
193
  except Exception as e:
165
194
  log_warning(f"Error in memory creation: {str(e)}")
166
195
  if stream_events:
196
+ # Get memories after update if callback provided
197
+ memories = None
198
+ if get_memories_callback is not None:
199
+ try:
200
+ memories = get_memories_callback()
201
+ except Exception as e:
202
+ log_warning(f"Error getting memories: {str(e)}")
203
+
167
204
  if isinstance(run_response, TeamRunOutput):
168
205
  yield handle_event( # type: ignore
169
- create_team_memory_update_completed_event(from_run_response=run_response),
206
+ create_team_memory_update_completed_event(from_run_response=run_response, memories=memories),
170
207
  run_response,
171
208
  events_to_skip=events_to_skip, # type: ignore
172
209
  store_events=store_events,
173
210
  )
174
211
  else:
175
212
  yield handle_event( # type: ignore
176
- create_memory_update_completed_event(from_run_response=run_response),
213
+ create_memory_update_completed_event(from_run_response=run_response, memories=memories),
177
214
  run_response,
178
215
  events_to_skip=events_to_skip, # type: ignore
179
216
  store_events=store_events,
agno/utils/events.py CHANGED
@@ -5,8 +5,12 @@ from agno.models.message import Citations
5
5
  from agno.models.response import ToolExecution
6
6
  from agno.reasoning.step import ReasoningStep
7
7
  from agno.run.agent import (
8
+ CompressionCompletedEvent,
9
+ CompressionStartedEvent,
8
10
  MemoryUpdateCompletedEvent,
9
11
  MemoryUpdateStartedEvent,
12
+ ModelRequestCompletedEvent,
13
+ ModelRequestStartedEvent,
10
14
  OutputModelResponseCompletedEvent,
11
15
  OutputModelResponseStartedEvent,
12
16
  ParserModelResponseCompletedEvent,
@@ -38,8 +42,12 @@ from agno.run.agent import (
38
42
  ToolCallStartedEvent,
39
43
  )
40
44
  from agno.run.requirement import RunRequirement
45
+ from agno.run.team import CompressionCompletedEvent as TeamCompressionCompletedEvent
46
+ from agno.run.team import CompressionStartedEvent as TeamCompressionStartedEvent
41
47
  from agno.run.team import MemoryUpdateCompletedEvent as TeamMemoryUpdateCompletedEvent
42
48
  from agno.run.team import MemoryUpdateStartedEvent as TeamMemoryUpdateStartedEvent
49
+ from agno.run.team import ModelRequestCompletedEvent as TeamModelRequestCompletedEvent
50
+ from agno.run.team import ModelRequestStartedEvent as TeamModelRequestStartedEvent
43
51
  from agno.run.team import OutputModelResponseCompletedEvent as TeamOutputModelResponseCompletedEvent
44
52
  from agno.run.team import OutputModelResponseStartedEvent as TeamOutputModelResponseStartedEvent
45
53
  from agno.run.team import ParserModelResponseCompletedEvent as TeamParserModelResponseCompletedEvent
@@ -349,21 +357,27 @@ def create_team_memory_update_started_event(from_run_response: TeamRunOutput) ->
349
357
  )
350
358
 
351
359
 
352
- def create_memory_update_completed_event(from_run_response: RunOutput) -> MemoryUpdateCompletedEvent:
360
+ def create_memory_update_completed_event(
361
+ from_run_response: RunOutput, memories: Optional[List[Any]] = None
362
+ ) -> MemoryUpdateCompletedEvent:
353
363
  return MemoryUpdateCompletedEvent(
354
364
  session_id=from_run_response.session_id,
355
365
  agent_id=from_run_response.agent_id, # type: ignore
356
366
  agent_name=from_run_response.agent_name, # type: ignore
357
367
  run_id=from_run_response.run_id,
368
+ memories=memories,
358
369
  )
359
370
 
360
371
 
361
- def create_team_memory_update_completed_event(from_run_response: TeamRunOutput) -> TeamMemoryUpdateCompletedEvent:
372
+ def create_team_memory_update_completed_event(
373
+ from_run_response: TeamRunOutput, memories: Optional[List[Any]] = None
374
+ ) -> TeamMemoryUpdateCompletedEvent:
362
375
  return TeamMemoryUpdateCompletedEvent(
363
376
  session_id=from_run_response.session_id,
364
377
  team_id=from_run_response.team_id, # type: ignore
365
378
  team_name=from_run_response.team_name, # type: ignore
366
379
  run_id=from_run_response.run_id,
380
+ memories=memories,
367
381
  )
368
382
 
369
383
 
@@ -759,6 +773,150 @@ def create_team_output_model_response_completed_event(
759
773
  )
760
774
 
761
775
 
776
+ def create_model_request_started_event(
777
+ from_run_response: RunOutput,
778
+ model: Optional[str] = None,
779
+ model_provider: Optional[str] = None,
780
+ ) -> ModelRequestStartedEvent:
781
+ return ModelRequestStartedEvent(
782
+ session_id=from_run_response.session_id,
783
+ agent_id=from_run_response.agent_id, # type: ignore
784
+ agent_name=from_run_response.agent_name, # type: ignore
785
+ run_id=from_run_response.run_id,
786
+ model=model,
787
+ model_provider=model_provider,
788
+ )
789
+
790
+
791
+ def create_model_request_completed_event(
792
+ from_run_response: RunOutput,
793
+ model: Optional[str] = None,
794
+ model_provider: Optional[str] = None,
795
+ input_tokens: Optional[int] = None,
796
+ output_tokens: Optional[int] = None,
797
+ total_tokens: Optional[int] = None,
798
+ time_to_first_token: Optional[float] = None,
799
+ reasoning_tokens: Optional[int] = None,
800
+ cache_read_tokens: Optional[int] = None,
801
+ cache_write_tokens: Optional[int] = None,
802
+ ) -> ModelRequestCompletedEvent:
803
+ return ModelRequestCompletedEvent(
804
+ session_id=from_run_response.session_id,
805
+ agent_id=from_run_response.agent_id, # type: ignore
806
+ agent_name=from_run_response.agent_name, # type: ignore
807
+ run_id=from_run_response.run_id,
808
+ model=model,
809
+ model_provider=model_provider,
810
+ input_tokens=input_tokens,
811
+ output_tokens=output_tokens,
812
+ total_tokens=total_tokens,
813
+ time_to_first_token=time_to_first_token,
814
+ reasoning_tokens=reasoning_tokens,
815
+ cache_read_tokens=cache_read_tokens,
816
+ cache_write_tokens=cache_write_tokens,
817
+ )
818
+
819
+
820
+ def create_team_model_request_started_event(
821
+ from_run_response: TeamRunOutput,
822
+ model: Optional[str] = None,
823
+ model_provider: Optional[str] = None,
824
+ ) -> TeamModelRequestStartedEvent:
825
+ return TeamModelRequestStartedEvent(
826
+ session_id=from_run_response.session_id,
827
+ team_id=from_run_response.team_id, # type: ignore
828
+ team_name=from_run_response.team_name, # type: ignore
829
+ run_id=from_run_response.run_id,
830
+ model=model,
831
+ model_provider=model_provider,
832
+ )
833
+
834
+
835
+ def create_team_model_request_completed_event(
836
+ from_run_response: TeamRunOutput,
837
+ model: Optional[str] = None,
838
+ model_provider: Optional[str] = None,
839
+ input_tokens: Optional[int] = None,
840
+ output_tokens: Optional[int] = None,
841
+ total_tokens: Optional[int] = None,
842
+ time_to_first_token: Optional[float] = None,
843
+ reasoning_tokens: Optional[int] = None,
844
+ cache_read_tokens: Optional[int] = None,
845
+ cache_write_tokens: Optional[int] = None,
846
+ ) -> TeamModelRequestCompletedEvent:
847
+ return TeamModelRequestCompletedEvent(
848
+ session_id=from_run_response.session_id,
849
+ team_id=from_run_response.team_id, # type: ignore
850
+ team_name=from_run_response.team_name, # type: ignore
851
+ run_id=from_run_response.run_id,
852
+ model=model,
853
+ model_provider=model_provider,
854
+ input_tokens=input_tokens,
855
+ output_tokens=output_tokens,
856
+ total_tokens=total_tokens,
857
+ time_to_first_token=time_to_first_token,
858
+ reasoning_tokens=reasoning_tokens,
859
+ cache_read_tokens=cache_read_tokens,
860
+ cache_write_tokens=cache_write_tokens,
861
+ )
862
+
863
+
864
+ def create_compression_started_event(
865
+ from_run_response: RunOutput,
866
+ ) -> CompressionStartedEvent:
867
+ return CompressionStartedEvent(
868
+ session_id=from_run_response.session_id,
869
+ agent_id=from_run_response.agent_id, # type: ignore
870
+ agent_name=from_run_response.agent_name, # type: ignore
871
+ run_id=from_run_response.run_id,
872
+ )
873
+
874
+
875
+ def create_compression_completed_event(
876
+ from_run_response: RunOutput,
877
+ tool_results_compressed: Optional[int] = None,
878
+ original_size: Optional[int] = None,
879
+ compressed_size: Optional[int] = None,
880
+ ) -> CompressionCompletedEvent:
881
+ return CompressionCompletedEvent(
882
+ session_id=from_run_response.session_id,
883
+ agent_id=from_run_response.agent_id, # type: ignore
884
+ agent_name=from_run_response.agent_name, # type: ignore
885
+ run_id=from_run_response.run_id,
886
+ tool_results_compressed=tool_results_compressed,
887
+ original_size=original_size,
888
+ compressed_size=compressed_size,
889
+ )
890
+
891
+
892
+ def create_team_compression_started_event(
893
+ from_run_response: TeamRunOutput,
894
+ ) -> TeamCompressionStartedEvent:
895
+ return TeamCompressionStartedEvent(
896
+ session_id=from_run_response.session_id,
897
+ team_id=from_run_response.team_id, # type: ignore
898
+ team_name=from_run_response.team_name, # type: ignore
899
+ run_id=from_run_response.run_id,
900
+ )
901
+
902
+
903
+ def create_team_compression_completed_event(
904
+ from_run_response: TeamRunOutput,
905
+ tool_results_compressed: Optional[int] = None,
906
+ original_size: Optional[int] = None,
907
+ compressed_size: Optional[int] = None,
908
+ ) -> TeamCompressionCompletedEvent:
909
+ return TeamCompressionCompletedEvent(
910
+ session_id=from_run_response.session_id,
911
+ team_id=from_run_response.team_id, # type: ignore
912
+ team_name=from_run_response.team_name, # type: ignore
913
+ run_id=from_run_response.run_id,
914
+ tool_results_compressed=tool_results_compressed,
915
+ original_size=original_size,
916
+ compressed_size=compressed_size,
917
+ )
918
+
919
+
762
920
  def handle_event(
763
921
  event: Union[RunOutputEvent, TeamRunOutputEvent],
764
922
  run_response: Union[RunOutput, TeamRunOutput],
@@ -1,5 +1,4 @@
1
1
  import json
2
- import warnings
3
2
  from collections.abc import Set
4
3
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast, get_args
5
4
 
@@ -555,8 +554,6 @@ def print_response(
555
554
  videos: Optional[Sequence[Video]] = None,
556
555
  files: Optional[Sequence[File]] = None,
557
556
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
558
- stream_events: Optional[bool] = None,
559
- stream_intermediate_steps: Optional[bool] = None,
560
557
  debug_mode: Optional[bool] = None,
561
558
  markdown: bool = False,
562
559
  show_message: bool = True,
@@ -571,19 +568,6 @@ def print_response(
571
568
  metadata: Optional[Dict[str, Any]] = None,
572
569
  **kwargs: Any,
573
570
  ):
574
- if stream_events is not None:
575
- warnings.warn(
576
- "The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
577
- DeprecationWarning,
578
- stacklevel=2,
579
- )
580
- if stream_intermediate_steps is not None:
581
- warnings.warn(
582
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
583
- DeprecationWarning,
584
- stacklevel=2,
585
- )
586
-
587
571
  with Live(console=console) as live_log:
588
572
  status = Status("Thinking...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
589
573
  live_log.update(status)
@@ -695,8 +679,6 @@ async def aprint_response(
695
679
  show_message: bool = True,
696
680
  show_reasoning: bool = True,
697
681
  show_full_reasoning: bool = False,
698
- stream_events: Optional[bool] = None,
699
- stream_intermediate_steps: Optional[bool] = None,
700
682
  tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
701
683
  console: Optional[Any] = None,
702
684
  add_history_to_context: Optional[bool] = None,
@@ -706,19 +688,6 @@ async def aprint_response(
706
688
  metadata: Optional[Dict[str, Any]] = None,
707
689
  **kwargs: Any,
708
690
  ):
709
- if stream_events is not None:
710
- warnings.warn(
711
- "The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
712
- DeprecationWarning,
713
- stacklevel=2,
714
- )
715
- if stream_intermediate_steps is not None:
716
- warnings.warn(
717
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
718
- DeprecationWarning,
719
- stacklevel=2,
720
- )
721
-
722
691
  with Live(console=console) as live_log:
723
692
  status = Status("Thinking...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
724
693
  live_log.update(status)
@@ -373,7 +373,6 @@ def print_response_stream(
373
373
  files: Optional[Sequence[File]] = None,
374
374
  markdown: bool = False,
375
375
  stream_events: bool = False,
376
- stream_intermediate_steps: bool = False, # type: ignore
377
376
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
378
377
  add_history_to_context: Optional[bool] = None,
379
378
  dependencies: Optional[Dict[str, Any]] = None,
@@ -1303,7 +1302,6 @@ async def aprint_response_stream(
1303
1302
  files: Optional[Sequence[File]] = None,
1304
1303
  markdown: bool = False,
1305
1304
  stream_events: bool = False,
1306
- stream_intermediate_steps: bool = False, # type: ignore
1307
1305
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
1308
1306
  add_history_to_context: Optional[bool] = None,
1309
1307
  dependencies: Optional[Dict[str, Any]] = None,
@@ -203,7 +203,6 @@ def print_response_stream(
203
203
  videos: Optional[List[Video]] = None,
204
204
  files: Optional[List[File]] = None,
205
205
  stream_events: bool = False,
206
- stream_intermediate_steps: bool = False,
207
206
  markdown: bool = True,
208
207
  show_time: bool = True,
209
208
  show_step_details: bool = True,
@@ -1042,7 +1041,6 @@ async def aprint_response_stream(
1042
1041
  videos: Optional[List[Video]] = None,
1043
1042
  files: Optional[List[File]] = None,
1044
1043
  stream_events: bool = False,
1045
- stream_intermediate_steps: bool = False,
1046
1044
  markdown: bool = True,
1047
1045
  show_time: bool = True,
1048
1046
  show_step_details: bool = True,