agno 2.1.10__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/utils/agent.py ADDED
@@ -0,0 +1,372 @@
1
+ from asyncio import Future, Task
2
+ from typing import AsyncIterator, Iterator, List, Optional, Sequence, Union
3
+
4
+ from agno.media import Audio, File, Image, Video
5
+ from agno.models.message import Message
6
+ from agno.run.agent import RunEvent, RunInput, RunOutput, RunOutputEvent
7
+ from agno.run.team import RunOutputEvent as TeamRunOutputEvent
8
+ from agno.run.team import TeamRunOutput
9
+ from agno.session import AgentSession, TeamSession
10
+ from agno.utils.events import (
11
+ create_memory_update_completed_event,
12
+ create_memory_update_started_event,
13
+ create_team_memory_update_completed_event,
14
+ create_team_memory_update_started_event,
15
+ handle_event,
16
+ )
17
+ from agno.utils.log import log_debug, log_warning
18
+
19
+
20
+ async def await_for_background_tasks(
21
+ memory_task: Optional[Task] = None,
22
+ cultural_knowledge_task: Optional[Task] = None,
23
+ ) -> None:
24
+ if memory_task is not None:
25
+ try:
26
+ await memory_task
27
+ except Exception as e:
28
+ log_warning(f"Error in memory creation: {str(e)}")
29
+
30
+ if cultural_knowledge_task is not None:
31
+ try:
32
+ await cultural_knowledge_task
33
+ except Exception as e:
34
+ log_warning(f"Error in cultural knowledge creation: {str(e)}")
35
+
36
+
37
+ def wait_for_background_tasks(
38
+ memory_future: Optional[Future] = None, cultural_knowledge_future: Optional[Future] = None
39
+ ) -> None:
40
+ if memory_future is not None:
41
+ try:
42
+ memory_future.result()
43
+ except Exception as e:
44
+ log_warning(f"Error in memory creation: {str(e)}")
45
+
46
+ # Wait for cultural knowledge creation
47
+ if cultural_knowledge_future is not None:
48
+ try:
49
+ cultural_knowledge_future.result()
50
+ except Exception as e:
51
+ log_warning(f"Error in cultural knowledge creation: {str(e)}")
52
+
53
+
54
+ async def await_for_background_tasks_stream(
55
+ run_response: Union[RunOutput, TeamRunOutput],
56
+ memory_task: Optional[Task] = None,
57
+ cultural_knowledge_task: Optional[Task] = None,
58
+ stream_events: bool = False,
59
+ events_to_skip: Optional[List[RunEvent]] = None,
60
+ store_events: bool = False,
61
+ ) -> AsyncIterator[RunOutputEvent]:
62
+ if memory_task is not None:
63
+ if stream_events:
64
+ if isinstance(run_response, TeamRunOutput):
65
+ yield handle_event( # type: ignore
66
+ create_team_memory_update_started_event(from_run_response=run_response),
67
+ run_response,
68
+ events_to_skip=events_to_skip, # type: ignore
69
+ store_events=store_events,
70
+ )
71
+ else:
72
+ yield handle_event( # type: ignore
73
+ create_memory_update_started_event(from_run_response=run_response),
74
+ run_response,
75
+ events_to_skip=events_to_skip, # type: ignore
76
+ store_events=store_events,
77
+ )
78
+ try:
79
+ await memory_task
80
+ except Exception as e:
81
+ log_warning(f"Error in memory creation: {str(e)}")
82
+ if stream_events:
83
+ if isinstance(run_response, TeamRunOutput):
84
+ yield handle_event( # type: ignore
85
+ create_team_memory_update_completed_event(from_run_response=run_response),
86
+ run_response,
87
+ events_to_skip=events_to_skip, # type: ignore
88
+ store_events=store_events,
89
+ )
90
+ else:
91
+ yield handle_event( # type: ignore
92
+ create_memory_update_completed_event(from_run_response=run_response),
93
+ run_response,
94
+ events_to_skip=events_to_skip, # type: ignore
95
+ store_events=store_events,
96
+ )
97
+
98
+ if cultural_knowledge_task is not None:
99
+ try:
100
+ await cultural_knowledge_task
101
+ except Exception as e:
102
+ log_warning(f"Error in cultural knowledge creation: {str(e)}")
103
+
104
+
105
+ def wait_for_background_tasks_stream(
106
+ run_response: Union[TeamRunOutput, RunOutput],
107
+ memory_future: Optional[Future] = None,
108
+ cultural_knowledge_future: Optional[Future] = None,
109
+ stream_events: bool = False,
110
+ events_to_skip: Optional[List[RunEvent]] = None,
111
+ store_events: bool = False,
112
+ ) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]:
113
+ if memory_future is not None:
114
+ if stream_events:
115
+ if isinstance(run_response, TeamRunOutput):
116
+ yield handle_event( # type: ignore
117
+ create_team_memory_update_started_event(from_run_response=run_response),
118
+ run_response,
119
+ events_to_skip=events_to_skip, # type: ignore
120
+ store_events=store_events,
121
+ )
122
+ else:
123
+ yield handle_event( # type: ignore
124
+ create_memory_update_started_event(from_run_response=run_response),
125
+ run_response,
126
+ events_to_skip=events_to_skip, # type: ignore
127
+ store_events=store_events,
128
+ )
129
+ try:
130
+ memory_future.result()
131
+ except Exception as e:
132
+ log_warning(f"Error in memory creation: {str(e)}")
133
+ if stream_events:
134
+ if isinstance(run_response, TeamRunOutput):
135
+ yield handle_event( # type: ignore
136
+ create_team_memory_update_completed_event(from_run_response=run_response),
137
+ run_response,
138
+ events_to_skip=events_to_skip, # type: ignore
139
+ store_events=store_events,
140
+ )
141
+ else:
142
+ yield handle_event( # type: ignore
143
+ create_memory_update_completed_event(from_run_response=run_response),
144
+ run_response,
145
+ events_to_skip=events_to_skip, # type: ignore
146
+ store_events=store_events,
147
+ )
148
+
149
+ # Wait for cultural knowledge creation
150
+ if cultural_knowledge_future is not None:
151
+ # TODO: Add events
152
+ try:
153
+ cultural_knowledge_future.result()
154
+ except Exception as e:
155
+ log_warning(f"Error in cultural knowledge creation: {str(e)}")
156
+
157
+
158
+ def collect_joint_images(
159
+ run_input: Optional[RunInput] = None,
160
+ session: Optional[Union[AgentSession, TeamSession]] = None,
161
+ ) -> Optional[Sequence[Image]]:
162
+ """Collect images from input, session history, and current run response."""
163
+ joint_images: List[Image] = []
164
+
165
+ # 1. Add images from current input
166
+ if run_input and run_input.images:
167
+ joint_images.extend(run_input.images)
168
+ log_debug(f"Added {len(run_input.images)} input images to joint list")
169
+
170
+ # 2. Add images from session history (from both input and generated sources)
171
+ try:
172
+ if session and session.runs:
173
+ for historical_run in session.runs:
174
+ # Add generated images from previous runs
175
+ if historical_run.images:
176
+ joint_images.extend(historical_run.images)
177
+ log_debug(
178
+ f"Added {len(historical_run.images)} generated images from historical run {historical_run.run_id}"
179
+ )
180
+
181
+ # Add input images from previous runs
182
+ if historical_run.input and historical_run.input.images:
183
+ joint_images.extend(historical_run.input.images)
184
+ log_debug(
185
+ f"Added {len(historical_run.input.images)} input images from historical run {historical_run.run_id}"
186
+ )
187
+ except Exception as e:
188
+ log_debug(f"Could not access session history for images: {e}")
189
+
190
+ if joint_images:
191
+ log_debug(f"Images Available to Model: {len(joint_images)} images")
192
+ return joint_images if joint_images else None
193
+
194
+
195
+ def collect_joint_videos(
196
+ run_input: Optional[RunInput] = None,
197
+ session: Optional[Union[AgentSession, TeamSession]] = None,
198
+ ) -> Optional[Sequence[Video]]:
199
+ """Collect videos from input, session history, and current run response."""
200
+ joint_videos: List[Video] = []
201
+
202
+ # 1. Add videos from current input
203
+ if run_input and run_input.videos:
204
+ joint_videos.extend(run_input.videos)
205
+ log_debug(f"Added {len(run_input.videos)} input videos to joint list")
206
+
207
+ # 2. Add videos from session history (from both input and generated sources)
208
+ try:
209
+ if session and session.runs:
210
+ for historical_run in session.runs:
211
+ # Add generated videos from previous runs
212
+ if historical_run.videos:
213
+ joint_videos.extend(historical_run.videos)
214
+ log_debug(
215
+ f"Added {len(historical_run.videos)} generated videos from historical run {historical_run.run_id}"
216
+ )
217
+
218
+ # Add input videos from previous runs
219
+ if historical_run.input and historical_run.input.videos:
220
+ joint_videos.extend(historical_run.input.videos)
221
+ log_debug(
222
+ f"Added {len(historical_run.input.videos)} input videos from historical run {historical_run.run_id}"
223
+ )
224
+ except Exception as e:
225
+ log_debug(f"Could not access session history for videos: {e}")
226
+
227
+ if joint_videos:
228
+ log_debug(f"Videos Available to Model: {len(joint_videos)} videos")
229
+ return joint_videos if joint_videos else None
230
+
231
+
232
+ def collect_joint_audios(
233
+ run_input: Optional[RunInput] = None,
234
+ session: Optional[Union[AgentSession, TeamSession]] = None,
235
+ ) -> Optional[Sequence[Audio]]:
236
+ """Collect audios from input, session history, and current run response."""
237
+ joint_audios: List[Audio] = []
238
+
239
+ # 1. Add audios from current input
240
+ if run_input and run_input.audios:
241
+ joint_audios.extend(run_input.audios)
242
+ log_debug(f"Added {len(run_input.audios)} input audios to joint list")
243
+
244
+ # 2. Add audios from session history (from both input and generated sources)
245
+ try:
246
+ if session and session.runs:
247
+ for historical_run in session.runs:
248
+ # Add generated audios from previous runs
249
+ if historical_run.audio:
250
+ joint_audios.extend(historical_run.audio)
251
+ log_debug(
252
+ f"Added {len(historical_run.audio)} generated audios from historical run {historical_run.run_id}"
253
+ )
254
+
255
+ # Add input audios from previous runs
256
+ if historical_run.input and historical_run.input.audios:
257
+ joint_audios.extend(historical_run.input.audios)
258
+ log_debug(
259
+ f"Added {len(historical_run.input.audios)} input audios from historical run {historical_run.run_id}"
260
+ )
261
+ except Exception as e:
262
+ log_debug(f"Could not access session history for audios: {e}")
263
+
264
+ if joint_audios:
265
+ log_debug(f"Audios Available to Model: {len(joint_audios)} audios")
266
+ return joint_audios if joint_audios else None
267
+
268
+
269
+ def collect_joint_files(
270
+ run_input: Optional[RunInput] = None,
271
+ ) -> Optional[Sequence[File]]:
272
+ """Collect files from input and session history."""
273
+ from agno.utils.log import log_debug
274
+
275
+ joint_files: List[File] = []
276
+
277
+ # 1. Add files from current input
278
+ if run_input and run_input.files:
279
+ joint_files.extend(run_input.files)
280
+
281
+ # TODO: Files aren't stored in session history yet and dont have a FileArtifact
282
+
283
+ if joint_files:
284
+ log_debug(f"Files Available to Model: {len(joint_files)} files")
285
+
286
+ return joint_files if joint_files else None
287
+
288
+
289
+ def scrub_media_from_run_output(run_response: Union[RunOutput, TeamRunOutput]) -> None:
290
+ """
291
+ Completely remove all media from RunOutput when store_media=False.
292
+ This includes media in input, output artifacts, and all messages.
293
+ """
294
+ # 1. Scrub RunInput media
295
+ if run_response.input is not None:
296
+ run_response.input.images = []
297
+ run_response.input.videos = []
298
+ run_response.input.audios = []
299
+ run_response.input.files = []
300
+
301
+ # 3. Scrub media from all messages
302
+ if run_response.messages:
303
+ for message in run_response.messages:
304
+ scrub_media_from_message(message)
305
+
306
+ # 4. Scrub media from additional_input messages if any
307
+ if run_response.additional_input:
308
+ for message in run_response.additional_input:
309
+ scrub_media_from_message(message)
310
+
311
+ # 5. Scrub media from reasoning_messages if any
312
+ if run_response.reasoning_messages:
313
+ for message in run_response.reasoning_messages:
314
+ scrub_media_from_message(message)
315
+
316
+
317
+ def scrub_media_from_message(message: Message) -> None:
318
+ """Remove all media from a Message object."""
319
+ # Input media
320
+ message.images = None
321
+ message.videos = None
322
+ message.audio = None
323
+ message.files = None
324
+
325
+ # Output media
326
+ message.audio_output = None
327
+ message.image_output = None
328
+ message.video_output = None
329
+
330
+
331
+ def scrub_tool_results_from_run_output(run_response: Union[RunOutput, TeamRunOutput]) -> None:
332
+ """
333
+ Remove all tool-related data from RunOutput when store_tool_messages=False.
334
+ This removes both the tool call and its corresponding result to maintain API consistency.
335
+ """
336
+ if not run_response.messages:
337
+ return
338
+
339
+ # Step 1: Collect all tool_call_ids from tool result messages
340
+ tool_call_ids_to_remove = set()
341
+ for message in run_response.messages:
342
+ if message.role == "tool" and message.tool_call_id:
343
+ tool_call_ids_to_remove.add(message.tool_call_id)
344
+
345
+ # Step 2: Remove tool result messages (role="tool")
346
+ run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
347
+
348
+ # Step 3: Remove assistant messages that made those tool calls
349
+ filtered_messages = []
350
+ for message in run_response.messages:
351
+ # Check if this assistant message made any of the tool calls we're removing
352
+ should_remove = False
353
+ if message.role == "assistant" and message.tool_calls:
354
+ for tool_call in message.tool_calls:
355
+ if tool_call.get("id") in tool_call_ids_to_remove:
356
+ should_remove = True
357
+ break
358
+
359
+ if not should_remove:
360
+ filtered_messages.append(message)
361
+
362
+ run_response.messages = filtered_messages
363
+
364
+
365
+ def scrub_history_messages_from_run_output(run_response: Union[RunOutput, TeamRunOutput]) -> None:
366
+ """
367
+ Remove all history messages from TeamRunOutput when store_history_messages=False.
368
+ This removes messages that were loaded from the team's memory.
369
+ """
370
+ # Remove messages with from_history=True
371
+ if run_response.messages:
372
+ run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
agno/utils/events.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, List, Optional
1
+ from typing import Any, Dict, List, Optional, Union
2
2
 
3
3
  from agno.media import Audio, Image
4
4
  from agno.models.message import Citations
@@ -11,6 +11,8 @@ from agno.run.agent import (
11
11
  OutputModelResponseStartedEvent,
12
12
  ParserModelResponseCompletedEvent,
13
13
  ParserModelResponseStartedEvent,
14
+ PostHookCompletedEvent,
15
+ PostHookStartedEvent,
14
16
  PreHookCompletedEvent,
15
17
  PreHookStartedEvent,
16
18
  ReasoningCompletedEvent,
@@ -18,13 +20,18 @@ from agno.run.agent import (
18
20
  ReasoningStepEvent,
19
21
  RunCancelledEvent,
20
22
  RunCompletedEvent,
23
+ RunContentCompletedEvent,
21
24
  RunContentEvent,
22
25
  RunContinuedEvent,
23
26
  RunErrorEvent,
27
+ RunEvent,
24
28
  RunInput,
25
29
  RunOutput,
30
+ RunOutputEvent,
26
31
  RunPausedEvent,
27
32
  RunStartedEvent,
33
+ SessionSummaryCompletedEvent,
34
+ SessionSummaryStartedEvent,
28
35
  ToolCallCompletedEvent,
29
36
  ToolCallStartedEvent,
30
37
  )
@@ -34,6 +41,8 @@ from agno.run.team import OutputModelResponseCompletedEvent as TeamOutputModelRe
34
41
  from agno.run.team import OutputModelResponseStartedEvent as TeamOutputModelResponseStartedEvent
35
42
  from agno.run.team import ParserModelResponseCompletedEvent as TeamParserModelResponseCompletedEvent
36
43
  from agno.run.team import ParserModelResponseStartedEvent as TeamParserModelResponseStartedEvent
44
+ from agno.run.team import PostHookCompletedEvent as TeamPostHookCompletedEvent
45
+ from agno.run.team import PostHookStartedEvent as TeamPostHookStartedEvent
37
46
  from agno.run.team import PreHookCompletedEvent as TeamPreHookCompletedEvent
38
47
  from agno.run.team import PreHookStartedEvent as TeamPreHookStartedEvent
39
48
  from agno.run.team import ReasoningCompletedEvent as TeamReasoningCompletedEvent
@@ -41,12 +50,16 @@ from agno.run.team import ReasoningStartedEvent as TeamReasoningStartedEvent
41
50
  from agno.run.team import ReasoningStepEvent as TeamReasoningStepEvent
42
51
  from agno.run.team import RunCancelledEvent as TeamRunCancelledEvent
43
52
  from agno.run.team import RunCompletedEvent as TeamRunCompletedEvent
53
+ from agno.run.team import RunContentCompletedEvent as TeamRunContentCompletedEvent
44
54
  from agno.run.team import RunContentEvent as TeamRunContentEvent
45
55
  from agno.run.team import RunErrorEvent as TeamRunErrorEvent
46
56
  from agno.run.team import RunStartedEvent as TeamRunStartedEvent
47
- from agno.run.team import TeamRunInput, TeamRunOutput
57
+ from agno.run.team import SessionSummaryCompletedEvent as TeamSessionSummaryCompletedEvent
58
+ from agno.run.team import SessionSummaryStartedEvent as TeamSessionSummaryStartedEvent
59
+ from agno.run.team import TeamRunEvent, TeamRunInput, TeamRunOutput, TeamRunOutputEvent
48
60
  from agno.run.team import ToolCallCompletedEvent as TeamToolCallCompletedEvent
49
61
  from agno.run.team import ToolCallStartedEvent as TeamToolCallStartedEvent
62
+ from agno.session.summary import SessionSummary
50
63
 
51
64
 
52
65
  def create_team_run_started_event(from_run_response: TeamRunOutput) -> TeamRunStartedEvent:
@@ -242,6 +255,54 @@ def create_team_pre_hook_completed_event(
242
255
  )
243
256
 
244
257
 
258
+ def create_post_hook_started_event(
259
+ from_run_response: RunOutput, post_hook_name: Optional[str] = None
260
+ ) -> PostHookStartedEvent:
261
+ return PostHookStartedEvent(
262
+ session_id=from_run_response.session_id,
263
+ agent_id=from_run_response.agent_id, # type: ignore
264
+ agent_name=from_run_response.agent_name, # type: ignore
265
+ run_id=from_run_response.run_id,
266
+ post_hook_name=post_hook_name,
267
+ )
268
+
269
+
270
+ def create_team_post_hook_started_event(
271
+ from_run_response: TeamRunOutput, post_hook_name: Optional[str] = None
272
+ ) -> TeamPostHookStartedEvent:
273
+ return TeamPostHookStartedEvent(
274
+ session_id=from_run_response.session_id,
275
+ team_id=from_run_response.team_id, # type: ignore
276
+ team_name=from_run_response.team_name, # type: ignore
277
+ run_id=from_run_response.run_id,
278
+ post_hook_name=post_hook_name,
279
+ )
280
+
281
+
282
+ def create_post_hook_completed_event(
283
+ from_run_response: RunOutput, post_hook_name: Optional[str] = None
284
+ ) -> PostHookCompletedEvent:
285
+ return PostHookCompletedEvent(
286
+ session_id=from_run_response.session_id,
287
+ agent_id=from_run_response.agent_id, # type: ignore
288
+ agent_name=from_run_response.agent_name, # type: ignore
289
+ run_id=from_run_response.run_id,
290
+ post_hook_name=post_hook_name,
291
+ )
292
+
293
+
294
+ def create_team_post_hook_completed_event(
295
+ from_run_response: TeamRunOutput, post_hook_name: Optional[str] = None
296
+ ) -> TeamPostHookCompletedEvent:
297
+ return TeamPostHookCompletedEvent(
298
+ session_id=from_run_response.session_id,
299
+ team_id=from_run_response.team_id, # type: ignore
300
+ team_name=from_run_response.team_name, # type: ignore
301
+ run_id=from_run_response.run_id,
302
+ post_hook_name=post_hook_name,
303
+ )
304
+
305
+
245
306
  def create_memory_update_started_event(from_run_response: RunOutput) -> MemoryUpdateStartedEvent:
246
307
  return MemoryUpdateStartedEvent(
247
308
  session_id=from_run_response.session_id,
@@ -278,6 +339,50 @@ def create_team_memory_update_completed_event(from_run_response: TeamRunOutput)
278
339
  )
279
340
 
280
341
 
342
+ def create_team_session_summary_started_event(
343
+ from_run_response: TeamRunOutput,
344
+ ) -> TeamSessionSummaryStartedEvent:
345
+ return TeamSessionSummaryStartedEvent(
346
+ session_id=from_run_response.session_id,
347
+ team_id=from_run_response.team_id, # type: ignore
348
+ team_name=from_run_response.team_name, # type: ignore
349
+ run_id=from_run_response.run_id,
350
+ )
351
+
352
+
353
+ def create_team_session_summary_completed_event(
354
+ from_run_response: TeamRunOutput, session_summary: Optional[SessionSummary] = None
355
+ ) -> TeamSessionSummaryCompletedEvent:
356
+ return TeamSessionSummaryCompletedEvent(
357
+ session_id=from_run_response.session_id,
358
+ team_id=from_run_response.team_id, # type: ignore
359
+ team_name=from_run_response.team_name, # type: ignore
360
+ run_id=from_run_response.run_id,
361
+ session_summary=session_summary,
362
+ )
363
+
364
+
365
+ def create_session_summary_started_event(from_run_response: RunOutput) -> SessionSummaryStartedEvent:
366
+ return SessionSummaryStartedEvent(
367
+ session_id=from_run_response.session_id,
368
+ agent_id=from_run_response.agent_id, # type: ignore
369
+ agent_name=from_run_response.agent_name, # type: ignore
370
+ run_id=from_run_response.run_id,
371
+ )
372
+
373
+
374
+ def create_session_summary_completed_event(
375
+ from_run_response: RunOutput, session_summary: Optional[SessionSummary] = None
376
+ ) -> SessionSummaryCompletedEvent:
377
+ return SessionSummaryCompletedEvent(
378
+ session_id=from_run_response.session_id,
379
+ agent_id=from_run_response.agent_id, # type: ignore
380
+ agent_name=from_run_response.agent_name, # type: ignore
381
+ run_id=from_run_response.run_id,
382
+ session_summary=session_summary,
383
+ )
384
+
385
+
281
386
  def create_reasoning_started_event(from_run_response: RunOutput) -> ReasoningStartedEvent:
282
387
  return ReasoningStartedEvent(
283
388
  session_id=from_run_response.session_id,
@@ -468,6 +573,28 @@ def create_team_run_output_content_event(
468
573
  )
469
574
 
470
575
 
576
+ def create_run_content_completed_event(
577
+ from_run_response: RunOutput,
578
+ ) -> RunContentCompletedEvent:
579
+ return RunContentCompletedEvent(
580
+ session_id=from_run_response.session_id,
581
+ agent_id=from_run_response.agent_id, # type: ignore
582
+ agent_name=from_run_response.agent_name, # type: ignore
583
+ run_id=from_run_response.run_id,
584
+ )
585
+
586
+
587
+ def create_team_run_content_completed_event(
588
+ from_run_response: TeamRunOutput,
589
+ ) -> TeamRunContentCompletedEvent:
590
+ return TeamRunContentCompletedEvent(
591
+ session_id=from_run_response.session_id,
592
+ team_id=from_run_response.team_id, # type: ignore
593
+ team_name=from_run_response.team_name, # type: ignore
594
+ run_id=from_run_response.run_id,
595
+ )
596
+
597
+
471
598
  def create_parser_model_response_started_event(
472
599
  from_run_response: RunOutput,
473
600
  ) -> ParserModelResponseStartedEvent:
@@ -550,3 +677,18 @@ def create_team_output_model_response_completed_event(
550
677
  team_name=from_run_response.team_name, # type: ignore
551
678
  run_id=from_run_response.run_id,
552
679
  )
680
+
681
+
682
+ def handle_event(
683
+ event: Union[RunOutputEvent, TeamRunOutputEvent],
684
+ run_response: Union[RunOutput, TeamRunOutput],
685
+ events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
686
+ store_events: bool = False,
687
+ ) -> Union[RunOutputEvent, TeamRunOutputEvent]:
688
+ # We only store events that are not run_response_content events
689
+ events_to_skip = [event.value for event in events_to_skip] if events_to_skip else []
690
+ if store_events and event.event not in events_to_skip:
691
+ if run_response.events is None:
692
+ run_response.events = []
693
+ run_response.events.append(event) # type: ignore
694
+ return event
@@ -33,6 +33,7 @@ def print_response_stream(
33
33
  images: Optional[Sequence[Image]] = None,
34
34
  videos: Optional[Sequence[Video]] = None,
35
35
  files: Optional[Sequence[File]] = None,
36
+ stream_events: bool = False,
36
37
  stream_intermediate_steps: bool = False,
37
38
  knowledge_filters: Optional[Dict[str, Any]] = None,
38
39
  debug_mode: Optional[bool] = None,
@@ -80,6 +81,9 @@ def print_response_stream(
80
81
 
81
82
  input_content = get_text_from_message(input)
82
83
 
84
+ # Consider both stream_events and stream_intermediate_steps (deprecated)
85
+ stream_events = stream_events or stream_intermediate_steps
86
+
83
87
  for response_event in agent.run(
84
88
  input=input,
85
89
  session_id=session_id,
@@ -90,7 +94,7 @@ def print_response_stream(
90
94
  videos=videos,
91
95
  files=files,
92
96
  stream=True,
93
- stream_intermediate_steps=stream_intermediate_steps,
97
+ stream_events=stream_events,
94
98
  knowledge_filters=knowledge_filters,
95
99
  debug_mode=debug_mode,
96
100
  add_history_to_context=add_history_to_context,
@@ -221,6 +225,7 @@ async def aprint_response_stream(
221
225
  images: Optional[Sequence[Image]] = None,
222
226
  videos: Optional[Sequence[Video]] = None,
223
227
  files: Optional[Sequence[File]] = None,
228
+ stream_events: bool = False,
224
229
  stream_intermediate_steps: bool = False,
225
230
  knowledge_filters: Optional[Dict[str, Any]] = None,
226
231
  debug_mode: Optional[bool] = None,
@@ -266,6 +271,9 @@ async def aprint_response_stream(
266
271
  if render:
267
272
  live_log.update(Group(*panels))
268
273
 
274
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
275
+ stream_events = stream_events or stream_intermediate_steps
276
+
269
277
  result = agent.arun(
270
278
  input=input,
271
279
  session_id=session_id,
@@ -276,7 +284,7 @@ async def aprint_response_stream(
276
284
  videos=videos,
277
285
  files=files,
278
286
  stream=True,
279
- stream_intermediate_steps=stream_intermediate_steps,
287
+ stream_events=stream_events,
280
288
  knowledge_filters=knowledge_filters,
281
289
  debug_mode=debug_mode,
282
290
  add_history_to_context=add_history_to_context,
@@ -497,7 +505,6 @@ def print_response(
497
505
  images: Optional[Sequence[Image]] = None,
498
506
  videos: Optional[Sequence[Video]] = None,
499
507
  files: Optional[Sequence[File]] = None,
500
- stream_intermediate_steps: bool = False,
501
508
  knowledge_filters: Optional[Dict[str, Any]] = None,
502
509
  debug_mode: Optional[bool] = None,
503
510
  markdown: bool = False,
@@ -543,7 +550,6 @@ def print_response(
543
550
  videos=videos,
544
551
  files=files,
545
552
  stream=False,
546
- stream_intermediate_steps=stream_intermediate_steps,
547
553
  knowledge_filters=knowledge_filters,
548
554
  debug_mode=debug_mode,
549
555
  add_history_to_context=add_history_to_context,
@@ -615,7 +621,6 @@ async def aprint_response(
615
621
  images: Optional[Sequence[Image]] = None,
616
622
  videos: Optional[Sequence[Video]] = None,
617
623
  files: Optional[Sequence[File]] = None,
618
- stream_intermediate_steps: bool = False,
619
624
  knowledge_filters: Optional[Dict[str, Any]] = None,
620
625
  debug_mode: Optional[bool] = None,
621
626
  markdown: bool = False,
@@ -661,7 +666,6 @@ async def aprint_response(
661
666
  videos=videos,
662
667
  files=files,
663
668
  stream=False,
664
- stream_intermediate_steps=stream_intermediate_steps,
665
669
  knowledge_filters=knowledge_filters,
666
670
  debug_mode=debug_mode,
667
671
  add_history_to_context=add_history_to_context,