agno 2.1.10__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/team/team.py CHANGED
@@ -10,7 +10,6 @@ from typing import (
10
10
  Any,
11
11
  AsyncIterator,
12
12
  Callable,
13
- Coroutine,
14
13
  Dict,
15
14
  Iterator,
16
15
  List,
@@ -59,15 +58,28 @@ from agno.run.cancel import (
59
58
  )
60
59
  from agno.run.messages import RunMessages
61
60
  from agno.run.team import TeamRunEvent, TeamRunInput, TeamRunOutput, TeamRunOutputEvent
62
- from agno.session import SessionSummaryManager, TeamSession
61
+ from agno.session import SessionSummaryManager, TeamSession, WorkflowSession
63
62
  from agno.tools import Toolkit
64
63
  from agno.tools.function import Function
64
+ from agno.utils.agent import (
65
+ await_for_background_tasks,
66
+ await_for_background_tasks_stream,
67
+ collect_joint_audios,
68
+ collect_joint_files,
69
+ collect_joint_images,
70
+ collect_joint_videos,
71
+ scrub_history_messages_from_run_output,
72
+ scrub_media_from_run_output,
73
+ scrub_tool_results_from_run_output,
74
+ wait_for_background_tasks,
75
+ wait_for_background_tasks_stream,
76
+ )
65
77
  from agno.utils.common import is_typed_dict, validate_typed_dict
66
78
  from agno.utils.events import (
67
- create_team_memory_update_completed_event,
68
- create_team_memory_update_started_event,
69
79
  create_team_parser_model_response_completed_event,
70
80
  create_team_parser_model_response_started_event,
81
+ create_team_post_hook_completed_event,
82
+ create_team_post_hook_started_event,
71
83
  create_team_pre_hook_completed_event,
72
84
  create_team_pre_hook_started_event,
73
85
  create_team_reasoning_completed_event,
@@ -75,11 +87,15 @@ from agno.utils.events import (
75
87
  create_team_reasoning_step_event,
76
88
  create_team_run_cancelled_event,
77
89
  create_team_run_completed_event,
90
+ create_team_run_content_completed_event,
78
91
  create_team_run_error_event,
79
92
  create_team_run_output_content_event,
80
93
  create_team_run_started_event,
94
+ create_team_session_summary_completed_event,
95
+ create_team_session_summary_started_event,
81
96
  create_team_tool_call_completed_event,
82
97
  create_team_tool_call_started_event,
98
+ handle_event,
83
99
  )
84
100
  from agno.utils.hooks import filter_hook_args, normalize_hooks
85
101
  from agno.utils.knowledge import get_agentic_or_user_search_filters
@@ -131,15 +147,22 @@ class Team:
131
147
  model: Optional[Model] = None
132
148
 
133
149
  # --- Team settings ---
134
- # Name of the team
135
- name: Optional[str] = None
136
150
  # Team UUID (autogenerated if not set)
137
151
  id: Optional[str] = None
138
- # If this team is part of a team itself, this is the role of the team
139
- parent_team_id: Optional[str] = None
152
+ # Name of the team
153
+ name: Optional[str] = None
140
154
  # If this team is part of a team itself, this is the role of the team
141
155
  role: Optional[str] = None
142
156
 
157
+ # --- If this Team is part of a team itself ---
158
+ # If this team is part of a team itself, this is the ID of the parent team. This is set automatically.
159
+ parent_team_id: Optional[str] = None
160
+
161
+ # --- If this Team is part of a workflow ---
162
+ # Optional workflow ID. Indicates this team is part of a workflow. This is set automatically.
163
+ workflow_id: Optional[str] = None
164
+
165
+ # --- Team execution settings ---
143
166
  # If True, the team leader won't process responses from the members and instead will return them directly
144
167
  # Should not be used in combination with delegate_task_to_all_members
145
168
  respond_directly: bool = False
@@ -148,10 +171,6 @@ class Team:
148
171
  # Set to false if you want to send the run input directly to the member agents
149
172
  determine_input_for_members: bool = True
150
173
 
151
- # --- If this Team is part of a workflow ---
152
- # Optional workflow ID. Indicates this team is part of a workflow.
153
- workflow_id: Optional[str] = None
154
-
155
174
  # --- User settings ---
156
175
  # Default user ID for this team
157
176
  user_id: Optional[str] = None
@@ -170,13 +189,28 @@ class Team:
170
189
  # If True, cache the current Team session in memory for faster access
171
190
  cache_session: bool = False
172
191
 
173
- # If True, allow searching through previous sessions
192
+ # --- Team history settings ---
193
+ # add_history_to_context=true adds messages from the chat history to the messages list sent to the Model. This only applies to the team leader, not the members.
194
+ add_history_to_context: bool = False
195
+ # Number of historical runs to include in the messages
196
+ num_history_runs: int = 3
197
+
198
+ # Add this flag to control if the workflow should send the team history to the members. This means sending the team-level history to the members, not the agent-level history.
199
+ add_team_history_to_members: bool = False
200
+ # Number of historical runs to include in the messages sent to the members
201
+ num_team_history_runs: int = 3
202
+ # If True, send all member interactions (request/response) during the current run to members that have been delegated a task to
203
+ share_member_interactions: bool = False
204
+
205
+ # If True, adds a tool to allow searching through previous sessions
174
206
  search_session_history: Optional[bool] = False
175
207
  # Number of past sessions to include in the search
176
208
  num_history_sessions: Optional[int] = None
177
209
 
178
- # If True, resolve the session_state, dependencies, and metadata in the user and system messages
179
- resolve_in_context: bool = True
210
+ # If True, adds a tool to allow the team to read the team history (this is deprecated and will be removed in a future version)
211
+ read_team_history: bool = False
212
+ # If True, adds a tool to allow the team to read the chat history
213
+ read_chat_history: bool = False
180
214
 
181
215
  # --- System message settings ---
182
216
  # A description of the Team that is added to the start of the system message.
@@ -206,6 +240,9 @@ class Team:
206
240
  # Role for the system message
207
241
  system_message_role: str = "system"
208
242
 
243
+ # If True, resolve the session_state, dependencies, and metadata in the user and system messages
244
+ resolve_in_context: bool = True
245
+
209
246
  # --- Extra Messages ---
210
247
  # A list of extra messages added after the system message and before the user message.
211
248
  # Use these for few-shot learning or to provide additional context to the Model.
@@ -244,17 +281,12 @@ class Team:
244
281
  references_format: Literal["json", "yaml"] = "json"
245
282
 
246
283
  # --- Tools ---
247
- # If True, send all previous member interactions to members
248
- share_member_interactions: bool = False
249
284
  # If True, add a tool to get information about the team members
250
285
  get_member_information_tool: bool = False
251
286
  # Add a tool to search the knowledge base (aka Agentic RAG)
252
287
  # Only added if knowledge is provided.
253
288
  search_knowledge: bool = True
254
289
 
255
- # If True, read the team history
256
- read_team_history: bool = False
257
-
258
290
  # If False, media (images, videos, audio, files) is only available to tools and not sent to the LLM
259
291
  send_media_to_model: bool = True
260
292
  # If True, store media in run output
@@ -322,12 +354,6 @@ class Team:
322
354
  # If True, the team adds session summaries to the context
323
355
  add_session_summary_to_context: Optional[bool] = None
324
356
 
325
- # --- Team History ---
326
- # add_history_to_context=true adds messages from the chat history to the messages list sent to the Model.
327
- add_history_to_context: bool = False
328
- # Number of historical runs to include in the messages
329
- num_history_runs: int = 3
330
-
331
357
  # --- Team Storage ---
332
358
  # Metadata stored with this team
333
359
  metadata: Optional[Dict[str, Any]] = None
@@ -342,8 +368,10 @@ class Team:
342
368
  # --- Team Streaming ---
343
369
  # Stream the response from the Team
344
370
  stream: Optional[bool] = None
345
- # Stream the intermediate steps from the Team
346
- stream_intermediate_steps: bool = False
371
+ # Stream the intermediate steps from the Agent
372
+ stream_events: Optional[bool] = None
373
+ # [Deprecated] Stream the intermediate steps from the Agent
374
+ stream_intermediate_steps: Optional[bool] = None
347
375
  # Stream the member events from the Team
348
376
  stream_member_events: bool = True
349
377
 
@@ -393,6 +421,10 @@ class Team:
393
421
  overwrite_db_session_state: bool = False,
394
422
  resolve_in_context: bool = True,
395
423
  cache_session: bool = False,
424
+ add_history_to_context: bool = False,
425
+ num_history_runs: int = 3,
426
+ add_team_history_to_members: bool = False,
427
+ num_team_history_runs: int = 3,
396
428
  search_session_history: Optional[bool] = False,
397
429
  num_history_sessions: Optional[int] = None,
398
430
  description: Optional[str] = None,
@@ -421,6 +453,7 @@ class Team:
421
453
  get_member_information_tool: bool = False,
422
454
  search_knowledge: bool = True,
423
455
  read_team_history: bool = False,
456
+ read_chat_history: bool = False,
424
457
  store_media: bool = True,
425
458
  store_tool_messages: bool = True,
426
459
  store_history_messages: bool = True,
@@ -447,8 +480,6 @@ class Team:
447
480
  enable_session_summaries: bool = False,
448
481
  session_summary_manager: Optional[SessionSummaryManager] = None,
449
482
  add_session_summary_to_context: Optional[bool] = None,
450
- add_history_to_context: bool = False,
451
- num_history_runs: int = 3,
452
483
  metadata: Optional[Dict[str, Any]] = None,
453
484
  reasoning: bool = False,
454
485
  reasoning_model: Optional[Model] = None,
@@ -456,7 +487,8 @@ class Team:
456
487
  reasoning_min_steps: int = 1,
457
488
  reasoning_max_steps: int = 10,
458
489
  stream: Optional[bool] = None,
459
- stream_intermediate_steps: bool = False,
490
+ stream_events: Optional[bool] = None,
491
+ stream_intermediate_steps: Optional[bool] = None,
460
492
  store_events: bool = False,
461
493
  events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
462
494
  store_member_responses: bool = False,
@@ -490,6 +522,10 @@ class Team:
490
522
  self.resolve_in_context = resolve_in_context
491
523
  self.cache_session = cache_session
492
524
 
525
+ self.add_history_to_context = add_history_to_context
526
+ self.num_history_runs = num_history_runs
527
+ self.add_team_history_to_members = add_team_history_to_members
528
+ self.num_team_history_runs = num_team_history_runs
493
529
  self.search_session_history = search_session_history
494
530
  self.num_history_sessions = num_history_sessions
495
531
 
@@ -521,7 +557,7 @@ class Team:
521
557
  self.share_member_interactions = share_member_interactions
522
558
  self.get_member_information_tool = get_member_information_tool
523
559
  self.search_knowledge = search_knowledge
524
- self.read_team_history = read_team_history
560
+ self.read_chat_history = read_chat_history or read_team_history
525
561
 
526
562
  self.store_media = store_media
527
563
  self.store_tool_messages = store_tool_messages
@@ -555,15 +591,8 @@ class Team:
555
591
  self.enable_session_summaries = enable_session_summaries
556
592
  self.session_summary_manager = session_summary_manager
557
593
  self.add_session_summary_to_context = add_session_summary_to_context
558
- self.add_history_to_context = add_history_to_context
559
- self.num_history_runs = num_history_runs
560
594
  self.metadata = metadata
561
595
 
562
- if add_history_to_context and not db:
563
- log_warning(
564
- "add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
565
- )
566
-
567
596
  self.reasoning = reasoning
568
597
  self.reasoning_model = reasoning_model
569
598
  self.reasoning_agent = reasoning_agent
@@ -571,7 +600,7 @@ class Team:
571
600
  self.reasoning_max_steps = reasoning_max_steps
572
601
 
573
602
  self.stream = stream
574
- self.stream_intermediate_steps = stream_intermediate_steps
603
+ self.stream_events = stream_events or stream_intermediate_steps
575
604
  self.store_events = store_events
576
605
  self.store_member_responses = store_member_responses
577
606
 
@@ -620,6 +649,22 @@ class Team:
620
649
 
621
650
  self._hooks_normalised = False
622
651
 
652
+ # Lazy-initialized shared thread pool executor for background tasks (memory, cultural knowledge, etc.)
653
+ self._background_executor: Optional[Any] = None
654
+
655
+ @property
656
+ def background_executor(self) -> Any:
657
+ """Lazy initialization of shared thread pool executor for background tasks.
658
+
659
+ Handles both memory creation and cultural knowledge updates concurrently.
660
+ Initialized only on first use (runtime, not instantiation) and reused across runs.
661
+ """
662
+ if self._background_executor is None:
663
+ from concurrent.futures import ThreadPoolExecutor
664
+
665
+ self._background_executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix="agno-bg")
666
+ return self._background_executor
667
+
623
668
  @property
624
669
  def should_parse_structured_output(self) -> bool:
625
670
  return self.output_schema is not None and self.parse_response and self.parser_model is None
@@ -759,11 +804,11 @@ class Team:
759
804
 
760
805
  def _initialize_session(
761
806
  self,
762
- run_id: str,
763
- user_id: Optional[str] = None,
764
807
  session_id: Optional[str] = None,
765
- session_state: Optional[Dict[str, Any]] = None,
766
- ) -> Tuple[str, Optional[str], Dict[str, Any]]:
808
+ user_id: Optional[str] = None,
809
+ ) -> Tuple[str, Optional[str]]:
810
+ """Initialize the session for the team."""
811
+
767
812
  if session_id is None:
768
813
  if self.session_id:
769
814
  session_id = self.session_id
@@ -775,30 +820,26 @@ class Team:
775
820
  log_debug(f"Session ID: {session_id}", center=True)
776
821
 
777
822
  # Use the default user_id when necessary
778
- if user_id is None:
823
+ if user_id is None or user_id == "":
779
824
  user_id = self.user_id
780
825
 
781
- # Determine the session_state with proper precedence
782
- if session_state is None:
783
- session_state = self.session_state or {}
784
- else:
785
- # If run session_state is provided, merge agent defaults under it
786
- # This ensures run state takes precedence over agent defaults
787
- if self.session_state:
788
- from agno.utils.merge_dict import merge_dictionaries
789
-
790
- base_state = self.session_state.copy()
791
- merge_dictionaries(base_state, session_state)
792
- session_state.clear()
793
- session_state.update(base_state)
826
+ return session_id, user_id
794
827
 
795
- if user_id is not None:
828
+ def _initialize_session_state(
829
+ self,
830
+ session_state: Dict[str, Any],
831
+ user_id: Optional[str] = None,
832
+ session_id: Optional[str] = None,
833
+ run_id: Optional[str] = None,
834
+ ) -> Dict[str, Any]:
835
+ """Initialize the session state for the team."""
836
+ if user_id:
796
837
  session_state["current_user_id"] = user_id
797
838
  if session_id is not None:
798
839
  session_state["current_session_id"] = session_id
799
- session_state["current_run_id"] = run_id
800
-
801
- return session_id, user_id, session_state # type: ignore
840
+ if run_id is not None:
841
+ session_state["current_run_id"] = run_id
842
+ return session_state
802
843
 
803
844
  def _has_async_db(self) -> bool:
804
845
  """Return True if the db the team is equipped with is an Async implementation"""
@@ -810,7 +851,7 @@ class Team:
810
851
 
811
852
  if self.delegate_task_to_all_members and self.respond_directly:
812
853
  log_warning(
813
- "delegate_task_to_all_members and respond_directly are both enabled. The task will be delegated to all members."
854
+ "`delegate_task_to_all_members` and `respond_directly` are both enabled. The task will be delegated to all members, but `respond_directly` will be disabled."
814
855
  )
815
856
  self.respond_directly = False
816
857
 
@@ -887,11 +928,13 @@ class Team:
887
928
  all_args.update(kwargs)
888
929
 
889
930
  for i, hook in enumerate(hooks):
890
- yield self._handle_event(
931
+ yield handle_event( # type: ignore
891
932
  run_response=run_response,
892
933
  event=create_team_pre_hook_started_event(
893
934
  from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
894
935
  ),
936
+ events_to_skip=self.events_to_skip,
937
+ store_events=self.store_events,
895
938
  )
896
939
  try:
897
940
  # Filter arguments to only include those that the hook accepts
@@ -899,11 +942,13 @@ class Team:
899
942
 
900
943
  hook(**filtered_args)
901
944
 
902
- yield self._handle_event(
945
+ yield handle_event( # type: ignore
903
946
  run_response=run_response,
904
947
  event=create_team_pre_hook_completed_event(
905
948
  from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
906
949
  ),
950
+ events_to_skip=self.events_to_skip,
951
+ store_events=self.store_events,
907
952
  )
908
953
 
909
954
  except (InputCheckError, OutputCheckError) as e:
@@ -949,11 +994,13 @@ class Team:
949
994
  all_args.update(kwargs)
950
995
 
951
996
  for i, hook in enumerate(hooks):
952
- yield self._handle_event(
997
+ yield handle_event( # type: ignore
953
998
  run_response=run_response,
954
999
  event=create_team_pre_hook_started_event(
955
1000
  from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
956
1001
  ),
1002
+ events_to_skip=self.events_to_skip,
1003
+ store_events=self.store_events,
957
1004
  )
958
1005
  try:
959
1006
  # Filter arguments to only include those that the hook accepts
@@ -965,11 +1012,13 @@ class Team:
965
1012
  # Synchronous function
966
1013
  hook(**filtered_args)
967
1014
 
968
- yield self._handle_event(
1015
+ yield handle_event( # type: ignore
969
1016
  run_response=run_response,
970
1017
  event=create_team_pre_hook_completed_event(
971
1018
  from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
972
1019
  ),
1020
+ events_to_skip=self.events_to_skip,
1021
+ store_events=self.store_events,
973
1022
  )
974
1023
 
975
1024
  except (InputCheckError, OutputCheckError) as e:
@@ -995,7 +1044,7 @@ class Team:
995
1044
  user_id: Optional[str] = None,
996
1045
  debug_mode: Optional[bool] = None,
997
1046
  **kwargs: Any,
998
- ) -> None:
1047
+ ) -> Iterator[TeamRunOutputEvent]:
999
1048
  """Execute multiple post-hook functions in succession."""
1000
1049
  if hooks is None:
1001
1050
  return
@@ -1014,12 +1063,31 @@ class Team:
1014
1063
  all_args.update(kwargs)
1015
1064
 
1016
1065
  for i, hook in enumerate(hooks):
1066
+ yield handle_event( # type: ignore
1067
+ run_response=run_output,
1068
+ event=create_team_post_hook_started_event( # type: ignore
1069
+ from_run_response=run_output,
1070
+ post_hook_name=hook.__name__,
1071
+ ),
1072
+ events_to_skip=self.events_to_skip,
1073
+ store_events=self.store_events,
1074
+ )
1017
1075
  try:
1018
1076
  # Filter arguments to only include those that the hook accepts
1019
1077
  filtered_args = filter_hook_args(hook, all_args)
1020
1078
 
1021
1079
  hook(**filtered_args)
1022
1080
 
1081
+ yield handle_event( # type: ignore
1082
+ run_response=run_output,
1083
+ event=create_team_post_hook_completed_event( # type: ignore
1084
+ from_run_response=run_output,
1085
+ post_hook_name=hook.__name__,
1086
+ ),
1087
+ events_to_skip=self.events_to_skip,
1088
+ store_events=self.store_events,
1089
+ )
1090
+
1023
1091
  except (InputCheckError, OutputCheckError) as e:
1024
1092
  raise e
1025
1093
  except Exception as e:
@@ -1037,7 +1105,7 @@ class Team:
1037
1105
  metadata: Optional[Dict[str, Any]] = None,
1038
1106
  debug_mode: Optional[bool] = None,
1039
1107
  **kwargs: Any,
1040
- ) -> None:
1108
+ ) -> AsyncIterator[TeamRunOutputEvent]:
1041
1109
  """Execute multiple post-hook functions in succession (async version)."""
1042
1110
  if hooks is None:
1043
1111
  return
@@ -1056,6 +1124,15 @@ class Team:
1056
1124
  all_args.update(kwargs)
1057
1125
 
1058
1126
  for i, hook in enumerate(hooks):
1127
+ yield handle_event( # type: ignore
1128
+ run_response=run_output,
1129
+ event=create_team_post_hook_started_event( # type: ignore
1130
+ from_run_response=run_output,
1131
+ post_hook_name=hook.__name__,
1132
+ ),
1133
+ events_to_skip=self.events_to_skip,
1134
+ store_events=self.store_events,
1135
+ )
1059
1136
  try:
1060
1137
  # Filter arguments to only include those that the hook accepts
1061
1138
  filtered_args = filter_hook_args(hook, all_args)
@@ -1065,6 +1142,15 @@ class Team:
1065
1142
  else:
1066
1143
  hook(**filtered_args)
1067
1144
 
1145
+ yield handle_event( # type: ignore
1146
+ run_response=run_output,
1147
+ event=create_team_post_hook_completed_event( # type: ignore
1148
+ from_run_response=run_output,
1149
+ post_hook_name=hook.__name__,
1150
+ ),
1151
+ events_to_skip=self.events_to_skip,
1152
+ store_events=self.store_events,
1153
+ )
1068
1154
  except (InputCheckError, OutputCheckError) as e:
1069
1155
  raise e
1070
1156
  except Exception as e:
@@ -1091,15 +1177,18 @@ class Team:
1091
1177
 
1092
1178
  Steps:
1093
1179
  1. Execute pre-hooks
1094
- 2. Get run messages
1095
- 3. Reason about the task(s) if reasoning is enabled
1096
- 4. Get a response from the model
1097
- 5. Update TeamRunOutput
1098
- 6. Execute post-hooks
1099
- 7. Add RunOutput to Team Session
1100
- 8. Calculate session metrics
1101
- 9. Update Team Memory
1102
- 10. Save session to storage
1180
+ 2. Determine tools for model
1181
+ 3. Prepare run messages
1182
+ 4. Start memory creation in background thread
1183
+ 5. Reason about the task if reasoning is enabled
1184
+ 6. Get a response from the model
1185
+ 7. Update TeamRunOutput with the model response
1186
+ 8. Store media if enabled
1187
+ 9. Convert response to structured format
1188
+ 10. Execute post-hooks
1189
+ 11. Wait for background memory creation
1190
+ 12. Create session summary
1191
+ 13. Cleanup and store (scrub, stop timer, add to session, calculate metrics, save session)
1103
1192
  """
1104
1193
 
1105
1194
  # Register run for cancellation tracking
@@ -1125,6 +1214,7 @@ class Team:
1125
1214
  # Consume the generator without yielding
1126
1215
  deque(pre_hook_iterator, maxlen=0)
1127
1216
 
1217
+ # 2. Determine tools for model
1128
1218
  # Initialize team run context
1129
1219
  team_run_context: Dict[str, Any] = {}
1130
1220
 
@@ -1150,7 +1240,7 @@ class Team:
1150
1240
  metadata=metadata,
1151
1241
  )
1152
1242
 
1153
- # 2. Prepare run messages
1243
+ # 3. Prepare run messages
1154
1244
  run_messages: RunMessages = self._get_run_messages(
1155
1245
  run_response=run_response,
1156
1246
  session=session,
@@ -1174,95 +1264,109 @@ class Team:
1174
1264
 
1175
1265
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1176
1266
 
1177
- # 3. Reason about the task(s) if reasoning is enabled
1178
- self._handle_reasoning(run_response=run_response, run_messages=run_messages)
1267
+ # 4. Start memory creation in background thread
1268
+ memory_future = None
1269
+ if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
1270
+ log_debug("Starting memory creation in background thread.")
1271
+ memory_future = self.background_executor.submit(
1272
+ self._make_memories, run_messages=run_messages, user_id=user_id
1273
+ )
1179
1274
 
1180
- # Check for cancellation before model call
1181
- raise_if_cancelled(run_response.run_id) # type: ignore
1275
+ try:
1276
+ raise_if_cancelled(run_response.run_id) # type: ignore
1182
1277
 
1183
- # 4. Get the model response for the team leader
1184
- self.model = cast(Model, self.model)
1185
- model_response: ModelResponse = self.model.response(
1186
- messages=run_messages.messages,
1187
- response_format=response_format,
1188
- tools=self._tools_for_model,
1189
- functions=self._functions_for_model,
1190
- tool_choice=self.tool_choice,
1191
- tool_call_limit=self.tool_call_limit,
1192
- send_media_to_model=self.send_media_to_model,
1193
- )
1278
+ # 5. Reason about the task if reasoning is enabled
1279
+ self._handle_reasoning(run_response=run_response, run_messages=run_messages)
1194
1280
 
1195
- # Check for cancellation after model call
1196
- raise_if_cancelled(run_response.run_id) # type: ignore
1281
+ # Check for cancellation before model call
1282
+ raise_if_cancelled(run_response.run_id) # type: ignore
1197
1283
 
1198
- # If an output model is provided, generate output using the output model
1199
- self._parse_response_with_output_model(model_response, run_messages)
1284
+ # 6. Get the model response for the team leader
1285
+ self.model = cast(Model, self.model)
1286
+ model_response: ModelResponse = self.model.response(
1287
+ messages=run_messages.messages,
1288
+ response_format=response_format,
1289
+ tools=self._tools_for_model,
1290
+ functions=self._functions_for_model,
1291
+ tool_choice=self.tool_choice,
1292
+ tool_call_limit=self.tool_call_limit,
1293
+ send_media_to_model=self.send_media_to_model,
1294
+ )
1200
1295
 
1201
- # If a parser model is provided, structure the response separately
1202
- self._parse_response_with_parser_model(model_response, run_messages)
1296
+ # Check for cancellation after model call
1297
+ raise_if_cancelled(run_response.run_id) # type: ignore
1203
1298
 
1204
- # 5. Update TeamRunOutput
1205
- self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
1299
+ # If an output model is provided, generate output using the output model
1300
+ self._parse_response_with_output_model(model_response, run_messages)
1206
1301
 
1207
- if self.store_media:
1208
- self._store_media(run_response, model_response)
1209
- else:
1210
- self._scrub_media_from_run_output(run_response)
1211
-
1212
- # Parse team response model
1213
- self._convert_response_to_structured_format(run_response=run_response)
1302
+ # If a parser model is provided, structure the response separately
1303
+ self._parse_response_with_parser_model(model_response, run_messages)
1214
1304
 
1215
- # 6. Execute post-hooks after output is generated but before response is returned
1216
- if self.post_hooks is not None:
1217
- self._execute_post_hooks(
1218
- hooks=self.post_hooks, # type: ignore
1219
- run_output=run_response,
1220
- session=session,
1221
- session_state=session_state,
1222
- dependencies=dependencies,
1223
- metadata=metadata,
1224
- user_id=user_id,
1225
- debug_mode=debug_mode,
1226
- **kwargs,
1305
+ # 7. Update TeamRunOutput with the model response
1306
+ self._update_run_response(
1307
+ model_response=model_response, run_response=run_response, run_messages=run_messages
1227
1308
  )
1228
1309
 
1229
- run_response.status = RunStatus.completed
1310
+ # 8. Store media if enabled
1311
+ if self.store_media:
1312
+ self._store_media(run_response, model_response)
1230
1313
 
1231
- # Set the run duration
1232
- if run_response.metrics:
1233
- run_response.metrics.stop_timer()
1314
+ # 9. Convert response to structured format
1315
+ self._convert_response_to_structured_format(run_response=run_response)
1234
1316
 
1235
- # 7. Add the RunOutput to Team Session
1236
- session.upsert_run(run_response=run_response)
1317
+ # 10. Execute post-hooks after output is generated but before response is returned
1318
+ if self.post_hooks is not None:
1319
+ iterator = self._execute_post_hooks(
1320
+ hooks=self.post_hooks, # type: ignore
1321
+ run_output=run_response,
1322
+ session=session,
1323
+ user_id=user_id,
1324
+ debug_mode=debug_mode,
1325
+ **kwargs,
1326
+ )
1327
+ deque(iterator, maxlen=0)
1328
+ raise_if_cancelled(run_response.run_id) # type: ignore
1237
1329
 
1238
- # 8. Calculate session metrics
1239
- self._update_session_metrics(session=session)
1330
+ # 11. Wait for background memory creation
1331
+ wait_for_background_tasks(memory_future=memory_future)
1240
1332
 
1241
- # 9. Update Team Memory
1242
- response_iterator = self._make_memories_and_summaries(
1243
- run_response=run_response,
1244
- run_messages=run_messages,
1245
- session=session,
1246
- user_id=user_id,
1247
- )
1248
- deque(response_iterator, maxlen=0)
1333
+ raise_if_cancelled(run_response.run_id) # type: ignore
1334
+
1335
+ # 12. Create session summary
1336
+ if self.session_summary_manager is not None:
1337
+ # Upsert the RunOutput to Team Session before creating the session summary
1338
+ session.upsert_run(run_response=run_response)
1339
+ try:
1340
+ self.session_summary_manager.create_session_summary(session=session)
1341
+ except Exception as e:
1342
+ log_warning(f"Error in session summary creation: {str(e)}")
1343
+
1344
+ raise_if_cancelled(run_response.run_id) # type: ignore
1249
1345
 
1250
- # 10. Scrub the stored run based on storage flags
1251
- if self._scrub_run_output_for_storage(run_response):
1252
- session.upsert_run(run_response=run_response)
1346
+ # Set the run status to completed
1347
+ run_response.status = RunStatus.completed
1253
1348
 
1254
- # 11. Save session to storage
1255
- self.save_session(session=session)
1349
+ # 13. Cleanup and store the run response
1350
+ self._cleanup_and_store(run_response=run_response, session=session)
1256
1351
 
1257
- # Log Team Telemetry
1258
- self._log_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1352
+ # Log Team Telemetry
1353
+ self._log_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1354
+
1355
+ log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
1259
1356
 
1260
- log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
1357
+ return run_response
1261
1358
 
1262
- # Always clean up the run tracking
1263
- cleanup_run(run_response.run_id) # type: ignore
1359
+ except RunCancelledException as e:
1360
+ # Handle run cancellation during streaming
1361
+ log_info(f"Team run {run_response.run_id} was cancelled")
1362
+ run_response.status = RunStatus.cancelled
1363
+ run_response.content = str(e)
1264
1364
 
1265
- return run_response
1365
+ # Add the RunOutput to Team Session even when cancelled
1366
+ self._cleanup_and_store(run_response=run_response, session=session)
1367
+ return run_response
1368
+ finally:
1369
+ cleanup_run(run_response.run_id) # type: ignore
1266
1370
 
1267
1371
  def _run_stream(
1268
1372
  self,
@@ -1277,7 +1381,7 @@ class Team:
1277
1381
  metadata: Optional[Dict[str, Any]] = None,
1278
1382
  dependencies: Optional[Dict[str, Any]] = None,
1279
1383
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1280
- stream_intermediate_steps: bool = False,
1384
+ stream_events: bool = False,
1281
1385
  yield_run_response: bool = False,
1282
1386
  debug_mode: Optional[bool] = None,
1283
1387
  **kwargs: Any,
@@ -1286,14 +1390,15 @@ class Team:
1286
1390
 
1287
1391
  Steps:
1288
1392
  1. Execute pre-hooks
1289
- 2. Prepare run messages
1290
- 3. Reason about the task(s) if reasoning is enabled
1291
- 4. Get a response from the model
1292
- 5. Add the run to Team Session
1293
- 6. Update Team Memory
1294
- 7. Create the run completed event
1295
- 8. Calculate session metrics
1296
- 9. Save session to storage
1393
+ 2. Determine tools for model
1394
+ 3. Prepare run messages
1395
+ 4. Start memory creation in background thread
1396
+ 5. Reason about the task if reasoning is enabled
1397
+ 6. Get a response from the model
1398
+ 7. Parse response with parser model if provided
1399
+ 8. Wait for background memory creation
1400
+ 9. Create session summary
1401
+ 10. Cleanup and store (scrub, add to session, calculate metrics, save session)
1297
1402
  """
1298
1403
  # Register run for cancellation tracking
1299
1404
  register_run(run_response.run_id) # type: ignore
@@ -1318,6 +1423,7 @@ class Team:
1318
1423
  for pre_hook_event in pre_hook_iterator:
1319
1424
  yield pre_hook_event
1320
1425
 
1426
+ # 2. Determine tools for model
1321
1427
  # Initialize team run context
1322
1428
  team_run_context: Dict[str, Any] = {}
1323
1429
 
@@ -1343,7 +1449,7 @@ class Team:
1343
1449
  metadata=metadata,
1344
1450
  )
1345
1451
 
1346
- # 2. Prepare run messages
1452
+ # 3. Prepare run messages
1347
1453
  run_messages: RunMessages = self._get_run_messages(
1348
1454
  run_response=run_response,
1349
1455
  session=session,
@@ -1367,28 +1473,44 @@ class Team:
1367
1473
 
1368
1474
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1369
1475
 
1476
+ # 4. Start memory creation in background thread
1477
+ memory_future = None
1478
+ if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
1479
+ log_debug("Starting memory creation in background thread.")
1480
+ memory_future = self.background_executor.submit(
1481
+ self._make_memories, run_messages=run_messages, user_id=user_id
1482
+ )
1483
+
1370
1484
  try:
1371
1485
  # Start the Run by yielding a RunStarted event
1372
- if stream_intermediate_steps:
1373
- yield self._handle_event(create_team_run_started_event(run_response), run_response)
1486
+ if stream_events:
1487
+ yield handle_event( # type: ignore
1488
+ create_team_run_started_event(run_response),
1489
+ run_response,
1490
+ events_to_skip=self.events_to_skip,
1491
+ store_events=self.store_events,
1492
+ )
1493
+
1494
+ raise_if_cancelled(run_response.run_id) # type: ignore
1374
1495
 
1375
- # 3. Reason about the task(s) if reasoning is enabled
1496
+ # 5. Reason about the task if reasoning is enabled
1376
1497
  yield from self._handle_reasoning_stream(
1377
1498
  run_response=run_response,
1378
1499
  run_messages=run_messages,
1500
+ stream_events=stream_events,
1379
1501
  )
1380
1502
 
1381
1503
  # Check for cancellation before model processing
1382
1504
  raise_if_cancelled(run_response.run_id) # type: ignore
1383
1505
 
1384
- # 4. Get a response from the model
1506
+ # 6. Get a response from the model
1385
1507
  if self.output_model is None:
1386
1508
  for event in self._handle_model_response_stream(
1387
1509
  session=session,
1388
1510
  run_response=run_response,
1389
1511
  run_messages=run_messages,
1390
1512
  response_format=response_format,
1391
- stream_intermediate_steps=stream_intermediate_steps,
1513
+ stream_events=stream_events,
1392
1514
  ):
1393
1515
  raise_if_cancelled(run_response.run_id) # type: ignore
1394
1516
  yield event
@@ -1398,13 +1520,13 @@ class Team:
1398
1520
  run_response=run_response,
1399
1521
  run_messages=run_messages,
1400
1522
  response_format=response_format,
1401
- stream_intermediate_steps=stream_intermediate_steps,
1523
+ stream_events=stream_events,
1402
1524
  ):
1403
1525
  raise_if_cancelled(run_response.run_id) # type: ignore
1404
1526
  from agno.run.team import IntermediateRunContentEvent, RunContentEvent
1405
1527
 
1406
1528
  if isinstance(event, RunContentEvent):
1407
- if stream_intermediate_steps:
1529
+ if stream_events:
1408
1530
  yield IntermediateRunContentEvent(
1409
1531
  content=event.content,
1410
1532
  content_type=event.content_type,
@@ -1416,7 +1538,7 @@ class Team:
1416
1538
  session=session,
1417
1539
  run_response=run_response,
1418
1540
  run_messages=run_messages,
1419
- stream_intermediate_steps=stream_intermediate_steps,
1541
+ stream_events=stream_events,
1420
1542
  ):
1421
1543
  raise_if_cancelled(run_response.run_id) # type: ignore
1422
1544
  yield event
@@ -1424,14 +1546,22 @@ class Team:
1424
1546
  # Check for cancellation after model processing
1425
1547
  raise_if_cancelled(run_response.run_id) # type: ignore
1426
1548
 
1427
- # If a parser model is provided, structure the response separately
1549
+ # 7. Parse response with parser model if provided
1428
1550
  yield from self._parse_response_with_parser_model_stream(
1429
- session=session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
1551
+ session=session, run_response=run_response, stream_events=stream_events
1430
1552
  )
1431
1553
 
1554
+ # Yield RunContentCompletedEvent
1555
+ if stream_events:
1556
+ yield handle_event( # type: ignore
1557
+ create_team_run_content_completed_event(from_run_response=run_response),
1558
+ run_response,
1559
+ events_to_skip=self.events_to_skip,
1560
+ store_events=self.store_events,
1561
+ )
1432
1562
  # Execute post-hooks after output is generated but before response is returned
1433
1563
  if self.post_hooks is not None:
1434
- self._execute_post_hooks(
1564
+ yield from self._execute_post_hooks(
1435
1565
  hooks=self.post_hooks, # type: ignore
1436
1566
  run_output=run_response,
1437
1567
  session_state=session_state,
@@ -1442,42 +1572,62 @@ class Team:
1442
1572
  debug_mode=debug_mode,
1443
1573
  **kwargs,
1444
1574
  )
1575
+ raise_if_cancelled(run_response.run_id) # type: ignore
1445
1576
 
1446
- run_response.status = RunStatus.completed
1447
- # Set the run duration
1448
- if run_response.metrics:
1449
- run_response.metrics.stop_timer()
1450
-
1451
- # 5. Add the run to Team Session
1452
- session.upsert_run(run_response=run_response)
1453
-
1454
- # 6. Update Team Memory
1455
- yield from self._make_memories_and_summaries(
1577
+ # 8. Wait for background memory creation
1578
+ yield from wait_for_background_tasks_stream(
1456
1579
  run_response=run_response,
1457
- run_messages=run_messages,
1458
- session=session,
1459
- user_id=user_id,
1580
+ memory_future=memory_future,
1581
+ stream_events=stream_events,
1582
+ events_to_skip=self.events_to_skip, # type: ignore
1583
+ store_events=self.store_events,
1460
1584
  )
1461
1585
 
1462
- # 7. Create the run completed event
1463
- completed_event = self._handle_event(
1586
+ raise_if_cancelled(run_response.run_id) # type: ignore
1587
+ # 9. Create session summary
1588
+ if self.session_summary_manager is not None:
1589
+ # Upsert the RunOutput to Team Session before creating the session summary
1590
+ session.upsert_run(run_response=run_response)
1591
+
1592
+ if stream_events:
1593
+ yield handle_event( # type: ignore
1594
+ create_team_session_summary_started_event(from_run_response=run_response),
1595
+ run_response,
1596
+ events_to_skip=self.events_to_skip,
1597
+ store_events=self.store_events,
1598
+ )
1599
+ try:
1600
+ self.session_summary_manager.create_session_summary(session=session)
1601
+ except Exception as e:
1602
+ log_warning(f"Error in session summary creation: {str(e)}")
1603
+ if stream_events:
1604
+ yield handle_event( # type: ignore
1605
+ create_team_session_summary_completed_event(
1606
+ from_run_response=run_response, session_summary=session.summary
1607
+ ),
1608
+ run_response,
1609
+ events_to_skip=self.events_to_skip,
1610
+ store_events=self.store_events,
1611
+ )
1612
+
1613
+ raise_if_cancelled(run_response.run_id) # type: ignore
1614
+ # Create the run completed event
1615
+ completed_event = handle_event(
1464
1616
  create_team_run_completed_event(
1465
1617
  from_run_response=run_response,
1466
1618
  ),
1467
1619
  run_response,
1620
+ events_to_skip=self.events_to_skip,
1621
+ store_events=self.store_events,
1468
1622
  )
1469
1623
 
1470
- # 8. Calculate session metrics
1471
- self._update_session_metrics(session=session)
1472
-
1473
- # 9. Scrub the stored run based on storage flags
1474
- if self._scrub_run_output_for_storage(run_response):
1475
- session.upsert_run(run_response=run_response)
1624
+ # Set the run status to completed
1625
+ run_response.status = RunStatus.completed
1476
1626
 
1477
- # 10. Save session to storage
1478
- self.save_session(session=session)
1627
+ # 10. Cleanup and store the run response
1628
+ self._cleanup_and_store(run_response=run_response, session=session)
1479
1629
 
1480
- if stream_intermediate_steps:
1630
+ if stream_events:
1481
1631
  yield completed_event
1482
1632
 
1483
1633
  if yield_run_response:
@@ -1495,14 +1645,15 @@ class Team:
1495
1645
  run_response.content = str(e)
1496
1646
 
1497
1647
  # Yield the cancellation event
1498
- yield self._handle_event(
1648
+ yield handle_event( # type: ignore
1499
1649
  create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
1500
1650
  run_response,
1651
+ events_to_skip=self.events_to_skip,
1652
+ store_events=self.store_events,
1501
1653
  )
1502
1654
 
1503
1655
  # Add the RunOutput to Team Session even when cancelled
1504
- session.upsert_run(run_response=run_response)
1505
- self.save_session(session=session)
1656
+ self._cleanup_and_store(run_response=run_response, session=session)
1506
1657
  finally:
1507
1658
  # Always clean up the run tracking
1508
1659
  cleanup_run(run_response.run_id) # type: ignore
@@ -1513,6 +1664,7 @@ class Team:
1513
1664
  input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1514
1665
  *,
1515
1666
  stream: Literal[False] = False,
1667
+ stream_events: Optional[bool] = None,
1516
1668
  stream_intermediate_steps: Optional[bool] = None,
1517
1669
  session_id: Optional[str] = None,
1518
1670
  session_state: Optional[Dict[str, Any]] = None,
@@ -1538,6 +1690,7 @@ class Team:
1538
1690
  input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1539
1691
  *,
1540
1692
  stream: Literal[True] = True,
1693
+ stream_events: Optional[bool] = None,
1541
1694
  stream_intermediate_steps: Optional[bool] = None,
1542
1695
  session_id: Optional[str] = None,
1543
1696
  session_state: Optional[Dict[str, Any]] = None,
@@ -1563,6 +1716,7 @@ class Team:
1563
1716
  input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1564
1717
  *,
1565
1718
  stream: Optional[bool] = None,
1719
+ stream_events: Optional[bool] = None,
1566
1720
  stream_intermediate_steps: Optional[bool] = None,
1567
1721
  session_id: Optional[str] = None,
1568
1722
  session_state: Optional[Dict[str, Any]] = None,
@@ -1586,6 +1740,14 @@ class Team:
1586
1740
  if self._has_async_db():
1587
1741
  raise Exception("run() is not supported with an async DB. Please use arun() instead.")
1588
1742
 
1743
+ # Initialize Team
1744
+ self.initialize_team(debug_mode=debug_mode)
1745
+
1746
+ if (add_history_to_context or self.add_history_to_context) and not self.db and not self.parent_team_id:
1747
+ log_warning(
1748
+ "add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
1749
+ )
1750
+
1589
1751
  # Create a run_id for this specific run
1590
1752
  run_id = str(uuid4())
1591
1753
 
@@ -1600,12 +1762,7 @@ class Team:
1600
1762
  self.post_hooks = normalize_hooks(self.post_hooks)
1601
1763
  self._hooks_normalised = True
1602
1764
 
1603
- session_id, user_id, session_state = self._initialize_session(
1604
- run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
1605
- )
1606
-
1607
- # Initialize Team
1608
- self.initialize_team(debug_mode=debug_mode)
1765
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
1609
1766
 
1610
1767
  image_artifacts, video_artifacts, audio_artifacts, file_artifacts = self._validate_media_object_id(
1611
1768
  images=images, videos=videos, audios=audio, files=files
@@ -1624,6 +1781,10 @@ class Team:
1624
1781
  team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
1625
1782
  self._update_metadata(session=team_session)
1626
1783
 
1784
+ # Initialize session state
1785
+ session_state = self._initialize_session_state(
1786
+ session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_id
1787
+ )
1627
1788
  # Update session state from DB
1628
1789
  session_state = self._load_session_state(session=team_session, session_state=session_state)
1629
1790
 
@@ -1656,17 +1817,18 @@ class Team:
1656
1817
  if stream is None:
1657
1818
  stream = False if self.stream is None else self.stream
1658
1819
 
1659
- if stream_intermediate_steps is None:
1660
- stream_intermediate_steps = (
1661
- False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
1662
- )
1820
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
1821
+ stream_events = stream_events or stream_intermediate_steps
1663
1822
 
1664
- # Can't have stream_intermediate_steps if stream is False
1823
+ # Can't stream events if streaming is disabled
1665
1824
  if stream is False:
1666
- stream_intermediate_steps = False
1825
+ stream_events = False
1826
+
1827
+ if stream_events is None:
1828
+ stream_events = False if self.stream_events is None else self.stream_events
1667
1829
 
1668
1830
  self.stream = self.stream or stream
1669
- self.stream_intermediate_steps = self.stream_intermediate_steps or (stream_intermediate_steps and self.stream)
1831
+ self.stream_events = self.stream_events or stream_events
1670
1832
 
1671
1833
  # Configure the model for runs
1672
1834
  response_format: Optional[Union[Dict, Type[BaseModel]]] = (
@@ -1722,7 +1884,7 @@ class Team:
1722
1884
  metadata=metadata,
1723
1885
  dependencies=run_dependencies,
1724
1886
  response_format=response_format,
1725
- stream_intermediate_steps=stream_intermediate_steps,
1887
+ stream_events=stream_events,
1726
1888
  yield_run_response=yield_run_response,
1727
1889
  debug_mode=debug_mode,
1728
1890
  **kwargs,
@@ -1761,17 +1923,6 @@ class Team:
1761
1923
  else:
1762
1924
  delay = self.delay_between_retries
1763
1925
  time.sleep(delay)
1764
- except RunCancelledException as e:
1765
- # Handle run cancellation
1766
- log_info(f"Team run {run_response.run_id} was cancelled")
1767
- run_response.content = str(e)
1768
- run_response.status = RunStatus.cancelled
1769
-
1770
- # Add the RunOutput to Team Session even when cancelled
1771
- team_session.upsert_run(run_response=run_response)
1772
- self.save_session(session=team_session)
1773
-
1774
- return run_response
1775
1926
  except KeyboardInterrupt:
1776
1927
  run_response.content = "Operation cancelled by user"
1777
1928
  run_response.status = RunStatus.cancelled
@@ -1812,10 +1963,6 @@ class Team:
1812
1963
  add_history_to_context: Optional[bool] = None,
1813
1964
  knowledge_filters: Optional[Dict[str, Any]] = None,
1814
1965
  metadata: Optional[Dict[str, Any]] = None,
1815
- audio: Optional[Sequence[Audio]] = None,
1816
- images: Optional[Sequence[Image]] = None,
1817
- videos: Optional[Sequence[Video]] = None,
1818
- files: Optional[Sequence[File]] = None,
1819
1966
  debug_mode: Optional[bool] = None,
1820
1967
  dependencies: Optional[Dict[str, Any]] = None,
1821
1968
  **kwargs: Any,
@@ -1828,16 +1975,16 @@ class Team:
1828
1975
  3. Execute pre-hooks
1829
1976
  4. Determine tools for model
1830
1977
  5. Prepare run messages
1831
- 6. Reason about the task if reasoning is enabled
1832
- 7. Get a response from the Model (includes running function calls)
1833
- 8. Update TeamRunOutput
1834
- 9. Add the run to memory
1835
- 10. Calculate session metrics
1836
- 11. Parse team response model
1837
- 12. Update Team Memory
1838
- 13. Scrub the stored run if needed
1839
- 14. Save session to storage
1840
- 15. Execute post-hooks
1978
+ 6. Start memory creation in background task
1979
+ 7. Reason about the task if reasoning is enabled
1980
+ 8. Get a response from the Model
1981
+ 9. Update TeamRunOutput with the model response
1982
+ 10. Store media if enabled
1983
+ 11. Convert response to structured format
1984
+ 12. Execute post-hooks
1985
+ 13. Wait for background memory creation
1986
+ 14. Create session summary
1987
+ 15. Cleanup and store (scrub, add to session, calculate metrics, save session)
1841
1988
  """
1842
1989
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1843
1990
 
@@ -1854,6 +2001,11 @@ class Team:
1854
2001
 
1855
2002
  # 2. Update metadata and session state
1856
2003
  self._update_metadata(session=team_session)
2004
+ # Initialize session state
2005
+ session_state = self._initialize_session_state(
2006
+ session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
2007
+ )
2008
+ # Update session state from DB
1857
2009
  session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
1858
2010
 
1859
2011
  run_input = cast(TeamRunInput, run_response.input)
@@ -1909,10 +2061,10 @@ class Team:
1909
2061
  session_state=session_state,
1910
2062
  user_id=user_id,
1911
2063
  input_message=run_input.input_content,
1912
- audio=audio,
1913
- images=images,
1914
- videos=videos,
1915
- files=files,
2064
+ audio=run_input.audios,
2065
+ images=run_input.images,
2066
+ videos=run_input.videos,
2067
+ files=run_input.files,
1916
2068
  knowledge_filters=knowledge_filters,
1917
2069
  add_history_to_context=add_history_to_context,
1918
2070
  dependencies=dependencies,
@@ -1921,98 +2073,119 @@ class Team:
1921
2073
  **kwargs,
1922
2074
  )
1923
2075
 
2076
+ self.model = cast(Model, self.model)
2077
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
2078
+
2079
+ # 6. Start memory creation in background task
2080
+ import asyncio
2081
+
2082
+ memory_task = None
2083
+ if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
2084
+ log_debug("Starting memory creation in background task.")
2085
+ memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
2086
+
1924
2087
  # Register run for cancellation tracking
1925
2088
  register_run(run_response.run_id) # type: ignore
1926
2089
 
1927
- # 6. Reason about the task(s) if reasoning is enabled
1928
- await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
2090
+ try:
2091
+ raise_if_cancelled(run_response.run_id) # type: ignore
2092
+ # 7. Reason about the task if reasoning is enabled
2093
+ await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1929
2094
 
1930
- # Check for cancellation before model call
1931
- raise_if_cancelled(run_response.run_id) # type: ignore
2095
+ # Check for cancellation before model call
2096
+ raise_if_cancelled(run_response.run_id) # type: ignore
1932
2097
 
1933
- # 7. Get the model response for the team leader
1934
- self.model = cast(Model, self.model)
1935
- model_response = await self.model.aresponse(
1936
- messages=run_messages.messages,
1937
- tools=self._tools_for_model,
1938
- functions=self._functions_for_model,
1939
- tool_choice=self.tool_choice,
1940
- tool_call_limit=self.tool_call_limit,
1941
- response_format=response_format,
1942
- send_media_to_model=self.send_media_to_model,
1943
- ) # type: ignore
1944
- raise_if_cancelled(run_response.run_id) # type: ignore
2098
+ # 8. Get the model response for the team leader
2099
+ model_response = await self.model.aresponse(
2100
+ messages=run_messages.messages,
2101
+ tools=self._tools_for_model,
2102
+ functions=self._functions_for_model,
2103
+ tool_choice=self.tool_choice,
2104
+ tool_call_limit=self.tool_call_limit,
2105
+ response_format=response_format,
2106
+ send_media_to_model=self.send_media_to_model,
2107
+ ) # type: ignore
2108
+
2109
+ # Check for cancellation after model call
2110
+ raise_if_cancelled(run_response.run_id) # type: ignore
1945
2111
 
1946
- # If an output model is provided, generate output using the output model
1947
- await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
1948
- # If a parser model is provided, structure the response separately
1949
- await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
2112
+ # If an output model is provided, generate output using the output model
2113
+ await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
1950
2114
 
1951
- # 8. Update TeamRunOutput
1952
- self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
2115
+ # If a parser model is provided, structure the response separately
2116
+ await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
1953
2117
 
1954
- # Optional: Store media
1955
- if self.store_media:
1956
- self._store_media(run_response, model_response)
1957
- else:
1958
- self._scrub_media_from_run_output(run_response)
2118
+ # 9. Update TeamRunOutput with the model response
2119
+ self._update_run_response(
2120
+ model_response=model_response, run_response=run_response, run_messages=run_messages
2121
+ )
1959
2122
 
1960
- # 11. Parse team response model
1961
- self._convert_response_to_structured_format(run_response=run_response)
2123
+ # 10. Store media if enabled
2124
+ if self.store_media:
2125
+ self._store_media(run_response, model_response)
1962
2126
 
1963
- # Execute post-hooks after output is generated but before response is returned
1964
- if self.post_hooks is not None:
1965
- await self._aexecute_post_hooks(
1966
- hooks=self.post_hooks, # type: ignore
1967
- run_output=run_response,
1968
- session=team_session,
1969
- user_id=user_id,
1970
- debug_mode=debug_mode,
1971
- session_state=session_state,
1972
- dependencies=dependencies,
1973
- metadata=metadata,
1974
- **kwargs,
1975
- )
2127
+ # 11. Convert response to structured format
2128
+ self._convert_response_to_structured_format(run_response=run_response)
1976
2129
 
1977
- run_response.status = RunStatus.completed
2130
+ # 12. Execute post-hooks after output is generated but before response is returned
2131
+ if self.post_hooks is not None:
2132
+ async for _ in self._aexecute_post_hooks(
2133
+ hooks=self.post_hooks, # type: ignore
2134
+ run_output=run_response,
2135
+ session=team_session,
2136
+ user_id=user_id,
2137
+ debug_mode=debug_mode,
2138
+ **kwargs,
2139
+ ):
2140
+ pass
1978
2141
 
1979
- # Set the run duration
1980
- if run_response.metrics:
1981
- run_response.metrics.stop_timer()
2142
+ raise_if_cancelled(run_response.run_id) # type: ignore
1982
2143
 
1983
- # 9. Add the run to memory
1984
- team_session.upsert_run(run_response=run_response)
2144
+ # 13. Wait for background memory creation
2145
+ await await_for_background_tasks(memory_task=memory_task)
1985
2146
 
1986
- # 10. Calculate session metrics
1987
- self._update_session_metrics(session=team_session)
2147
+ raise_if_cancelled(run_response.run_id) # type: ignore
2148
+ # 14. Create session summary
2149
+ if self.session_summary_manager is not None:
2150
+ # Upsert the RunOutput to Team Session before creating the session summary
2151
+ team_session.upsert_run(run_response=run_response)
2152
+ try:
2153
+ await self.session_summary_manager.acreate_session_summary(session=team_session)
2154
+ except Exception as e:
2155
+ log_warning(f"Error in session summary creation: {str(e)}")
1988
2156
 
1989
- # 12. Update Team Memory
1990
- async for _ in self._amake_memories_and_summaries(
1991
- run_response=run_response,
1992
- session=team_session,
1993
- run_messages=run_messages,
1994
- user_id=user_id,
1995
- ):
1996
- pass
2157
+ raise_if_cancelled(run_response.run_id) # type: ignore
2158
+ run_response.status = RunStatus.completed
1997
2159
 
1998
- # 13. Scrub the stored run based on storage flags
1999
- if self._scrub_run_output_for_storage(run_response):
2000
- team_session.upsert_run(run_response=run_response)
2160
+ # 15. Cleanup and store the run response and session
2161
+ await self._acleanup_and_store(run_response=run_response, session=team_session)
2001
2162
 
2002
- # 14. Save session to storage
2003
- if self._has_async_db():
2004
- await self.asave_session(session=team_session)
2005
- else:
2006
- self.save_session(session=team_session)
2163
+ # Log Team Telemetry
2164
+ await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
2007
2165
 
2008
- # Log Team Telemetry
2009
- await self._alog_team_telemetry(session_id=team_session.session_id, run_id=run_response.run_id)
2166
+ log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
2010
2167
 
2011
- log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
2168
+ return run_response
2169
+ except RunCancelledException as e:
2170
+ # Handle run cancellation
2171
+ log_info(f"Run {run_response.run_id} was cancelled")
2172
+ run_response.content = str(e)
2173
+ run_response.status = RunStatus.cancelled
2012
2174
 
2013
- cleanup_run(run_response.run_id) # type: ignore
2175
+ # Cleanup and store the run response and session
2176
+ await self._acleanup_and_store(run_response=run_response, session=team_session)
2014
2177
 
2015
- return run_response
2178
+ return run_response
2179
+ finally:
2180
+ # Cancel the memory task if it's still running
2181
+ if memory_task is not None and not memory_task.done():
2182
+ memory_task.cancel()
2183
+ try:
2184
+ await memory_task
2185
+ except asyncio.CancelledError:
2186
+ pass
2187
+ # Always clean up the run tracking
2188
+ cleanup_run(run_response.run_id) # type: ignore
2016
2189
 
2017
2190
  async def _arun_stream(
2018
2191
  self,
@@ -2021,6 +2194,7 @@ class Team:
2021
2194
  session_state: Optional[Dict[str, Any]] = None,
2022
2195
  user_id: Optional[str] = None,
2023
2196
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2197
+ stream_events: bool = False,
2024
2198
  stream_intermediate_steps: bool = False,
2025
2199
  yield_run_response: bool = False,
2026
2200
  add_dependencies_to_context: Optional[bool] = None,
@@ -2028,10 +2202,6 @@ class Team:
2028
2202
  add_history_to_context: Optional[bool] = None,
2029
2203
  knowledge_filters: Optional[Dict[str, Any]] = None,
2030
2204
  metadata: Optional[Dict[str, Any]] = None,
2031
- audio: Optional[Sequence[Audio]] = None,
2032
- images: Optional[Sequence[Image]] = None,
2033
- videos: Optional[Sequence[Video]] = None,
2034
- files: Optional[Sequence[File]] = None,
2035
2205
  debug_mode: Optional[bool] = None,
2036
2206
  dependencies: Optional[Dict[str, Any]] = None,
2037
2207
  **kwargs: Any,
@@ -2045,15 +2215,13 @@ class Team:
2045
2215
  4. Execute pre-hooks
2046
2216
  5. Determine tools for model
2047
2217
  6. Prepare run messages
2048
- 7. Yield the run started event
2049
- 8. Reason about the task(s) if reasoning is enabled
2218
+ 7. Start memory creation in background task
2219
+ 8. Reason about the task if reasoning is enabled
2050
2220
  9. Get a response from the model
2051
- 10. Add the run to memory
2052
- 11. Update Team Memory
2053
- 12. Calculate session metrics
2054
- 13. Create the run completed event
2055
- 14. Scrub the stored run if needed
2056
- 15. Save session to storage
2221
+ 10. Parse response with parser model if provided
2222
+ 11. Wait for background memory creation
2223
+ 12. Create session summary
2224
+ 13. Cleanup and store (scrub, add to session, calculate metrics, save session)
2057
2225
  """
2058
2226
 
2059
2227
  # 1. Resolve dependencies
@@ -2068,6 +2236,11 @@ class Team:
2068
2236
 
2069
2237
  # 3. Update metadata and session state
2070
2238
  self._update_metadata(session=team_session)
2239
+ # Initialize session state
2240
+ session_state = self._initialize_session_state(
2241
+ session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
2242
+ )
2243
+ # Update session state from DB
2071
2244
  session_state = self._load_session_state(session=team_session, session_state=session_state) # type: ignore
2072
2245
 
2073
2246
  # 4. Execute pre-hooks
@@ -2102,10 +2275,10 @@ class Team:
2102
2275
  async_mode=True,
2103
2276
  knowledge_filters=knowledge_filters,
2104
2277
  input_message=run_input.input_content,
2105
- images=images,
2106
- videos=videos,
2107
- audio=audio,
2108
- files=files,
2278
+ images=run_input.images,
2279
+ videos=run_input.videos,
2280
+ audio=run_input.audios,
2281
+ files=run_input.files,
2109
2282
  debug_mode=debug_mode,
2110
2283
  add_history_to_context=add_history_to_context,
2111
2284
  dependencies=dependencies,
@@ -2119,10 +2292,10 @@ class Team:
2119
2292
  session_state=session_state,
2120
2293
  user_id=user_id,
2121
2294
  input_message=run_input.input_content,
2122
- audio=audio,
2123
- images=images,
2124
- videos=videos,
2125
- files=files,
2295
+ audio=run_input.audios,
2296
+ images=run_input.images,
2297
+ videos=run_input.videos,
2298
+ files=run_input.files,
2126
2299
  knowledge_filters=knowledge_filters,
2127
2300
  add_history_to_context=add_history_to_context,
2128
2301
  dependencies=dependencies,
@@ -2134,16 +2307,36 @@ class Team:
2134
2307
 
2135
2308
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
2136
2309
 
2310
+ # 7. Start memory creation in background task
2311
+ import asyncio
2312
+
2313
+ memory_task = None
2314
+ if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
2315
+ log_debug("Starting memory creation in background task.")
2316
+ memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
2317
+
2137
2318
  # Register run for cancellation tracking
2138
2319
  register_run(run_response.run_id) # type: ignore
2139
2320
 
2140
2321
  try:
2141
- # 7. Yield the run started event
2142
- if stream_intermediate_steps:
2143
- yield self._handle_event(create_team_run_started_event(from_run_response=run_response), run_response)
2322
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
2323
+ stream_events = stream_events or stream_intermediate_steps
2324
+
2325
+ # Yield the run started event
2326
+ if stream_events:
2327
+ yield handle_event( # type: ignore
2328
+ create_team_run_started_event(from_run_response=run_response),
2329
+ run_response,
2330
+ events_to_skip=self.events_to_skip,
2331
+ store_events=self.store_events,
2332
+ )
2144
2333
 
2145
- # 8. Reason about the task(s) if reasoning is enabled
2146
- async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
2334
+ # 8. Reason about the task if reasoning is enabled
2335
+ async for item in self._ahandle_reasoning_stream(
2336
+ run_response=run_response,
2337
+ run_messages=run_messages,
2338
+ stream_events=stream_events,
2339
+ ):
2147
2340
  raise_if_cancelled(run_response.run_id) # type: ignore
2148
2341
  yield item
2149
2342
 
@@ -2157,7 +2350,7 @@ class Team:
2157
2350
  run_response=run_response,
2158
2351
  run_messages=run_messages,
2159
2352
  response_format=response_format,
2160
- stream_intermediate_steps=stream_intermediate_steps,
2353
+ stream_events=stream_events,
2161
2354
  ):
2162
2355
  raise_if_cancelled(run_response.run_id) # type: ignore
2163
2356
  yield event
@@ -2167,13 +2360,13 @@ class Team:
2167
2360
  run_response=run_response,
2168
2361
  run_messages=run_messages,
2169
2362
  response_format=response_format,
2170
- stream_intermediate_steps=stream_intermediate_steps,
2363
+ stream_events=stream_events,
2171
2364
  ):
2172
2365
  raise_if_cancelled(run_response.run_id) # type: ignore
2173
2366
  from agno.run.team import IntermediateRunContentEvent, RunContentEvent
2174
2367
 
2175
2368
  if isinstance(event, RunContentEvent):
2176
- if stream_intermediate_steps:
2369
+ if stream_events:
2177
2370
  yield IntermediateRunContentEvent(
2178
2371
  content=event.content,
2179
2372
  content_type=event.content_type,
@@ -2185,7 +2378,7 @@ class Team:
2185
2378
  session=team_session,
2186
2379
  run_response=run_response,
2187
2380
  run_messages=run_messages,
2188
- stream_intermediate_steps=stream_intermediate_steps,
2381
+ stream_events=stream_events,
2189
2382
  ):
2190
2383
  raise_if_cancelled(run_response.run_id) # type: ignore
2191
2384
  yield event
@@ -2193,15 +2386,24 @@ class Team:
2193
2386
  # Check for cancellation after model processing
2194
2387
  raise_if_cancelled(run_response.run_id) # type: ignore
2195
2388
 
2196
- # If a parser model is provided, structure the response separately
2389
+ # 10. Parse response with parser model if provided
2197
2390
  async for event in self._aparse_response_with_parser_model_stream(
2198
- session=team_session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
2391
+ session=team_session, run_response=run_response, stream_events=stream_events
2199
2392
  ):
2200
2393
  yield event
2201
2394
 
2395
+ # Yield RunContentCompletedEvent
2396
+ if stream_events:
2397
+ yield handle_event( # type: ignore
2398
+ create_team_run_content_completed_event(from_run_response=run_response),
2399
+ run_response,
2400
+ events_to_skip=self.events_to_skip,
2401
+ store_events=self.store_events,
2402
+ )
2403
+
2202
2404
  # Execute post-hooks after output is generated but before response is returned
2203
2405
  if self.post_hooks is not None:
2204
- await self._aexecute_post_hooks(
2406
+ async for event in self._aexecute_post_hooks(
2205
2407
  hooks=self.post_hooks, # type: ignore
2206
2408
  run_output=run_response,
2207
2409
  session_state=session_state,
@@ -2211,45 +2413,65 @@ class Team:
2211
2413
  user_id=user_id,
2212
2414
  debug_mode=debug_mode,
2213
2415
  **kwargs,
2214
- )
2215
-
2216
- # Set the run duration
2217
- if run_response.metrics:
2218
- run_response.metrics.stop_timer()
2219
-
2220
- run_response.status = RunStatus.completed
2221
-
2222
- # 10. Add the run to memory
2223
- team_session.upsert_run(run_response=run_response)
2416
+ ):
2417
+ yield event
2224
2418
 
2225
- # 11. Update Team Memory
2226
- async for event in self._amake_memories_and_summaries(
2419
+ raise_if_cancelled(run_response.run_id) # type: ignore
2420
+ # 11. Wait for background memory creation
2421
+ async for event in await_for_background_tasks_stream(
2227
2422
  run_response=run_response,
2228
- session=team_session,
2229
- run_messages=run_messages,
2230
- user_id=user_id,
2423
+ memory_task=memory_task,
2424
+ stream_events=stream_events,
2425
+ events_to_skip=self.events_to_skip, # type: ignore
2426
+ store_events=self.store_events,
2231
2427
  ):
2232
2428
  yield event
2233
2429
 
2234
- # 12. Calculate session metrics
2235
- self._update_session_metrics(session=team_session)
2430
+ raise_if_cancelled(run_response.run_id) # type: ignore
2431
+
2432
+ # 12. Create session summary
2433
+ if self.session_summary_manager is not None:
2434
+ # Upsert the RunOutput to Team Session before creating the session summary
2435
+ team_session.upsert_run(run_response=run_response)
2436
+
2437
+ if stream_events:
2438
+ yield handle_event( # type: ignore
2439
+ create_team_session_summary_started_event(from_run_response=run_response),
2440
+ run_response,
2441
+ events_to_skip=self.events_to_skip,
2442
+ store_events=self.store_events,
2443
+ )
2444
+ try:
2445
+ await self.session_summary_manager.acreate_session_summary(session=team_session)
2446
+ except Exception as e:
2447
+ log_warning(f"Error in session summary creation: {str(e)}")
2448
+ if stream_events:
2449
+ yield handle_event( # type: ignore
2450
+ create_team_session_summary_completed_event(
2451
+ from_run_response=run_response, session_summary=team_session.summary
2452
+ ),
2453
+ run_response,
2454
+ events_to_skip=self.events_to_skip,
2455
+ store_events=self.store_events,
2456
+ )
2457
+
2458
+ raise_if_cancelled(run_response.run_id) # type: ignore
2236
2459
 
2237
- # 13. Create the run completed event
2238
- completed_event = self._handle_event(
2239
- create_team_run_completed_event(from_run_response=run_response), run_response
2460
+ # Create the run completed event
2461
+ completed_event = handle_event(
2462
+ create_team_run_completed_event(from_run_response=run_response),
2463
+ run_response,
2464
+ events_to_skip=self.events_to_skip,
2465
+ store_events=self.store_events,
2240
2466
  )
2241
2467
 
2242
- # 14. Scrub the stored run based on storage flags
2243
- if self._scrub_run_output_for_storage(run_response):
2244
- team_session.upsert_run(run_response=run_response)
2468
+ # Set the run status to completed
2469
+ run_response.status = RunStatus.completed
2245
2470
 
2246
- # 15. Save the session to storage
2247
- if self._has_async_db():
2248
- await self.asave_session(session=team_session)
2249
- else:
2250
- self.save_session(session=team_session)
2471
+ # 13. Cleanup and store the run response and session
2472
+ await self._acleanup_and_store(run_response=run_response, session=team_session)
2251
2473
 
2252
- if stream_intermediate_steps:
2474
+ if stream_events:
2253
2475
  yield completed_event
2254
2476
 
2255
2477
  if yield_run_response:
@@ -2267,18 +2489,24 @@ class Team:
2267
2489
  run_response.content = str(e)
2268
2490
 
2269
2491
  # Yield the cancellation event
2270
- yield self._handle_event(
2492
+ yield handle_event( # type: ignore
2271
2493
  create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
2272
2494
  run_response,
2495
+ events_to_skip=self.events_to_skip,
2496
+ store_events=self.store_events,
2273
2497
  )
2274
2498
 
2275
- # Add the RunOutput to Team Session even when cancelled
2276
- team_session.upsert_run(run_response=run_response)
2277
- if self._has_async_db():
2278
- await self.asave_session(session=team_session)
2279
- else:
2280
- self.save_session(session=team_session)
2499
+ # Cleanup and store the run response and session
2500
+ await self._acleanup_and_store(run_response=run_response, session=team_session)
2501
+
2281
2502
  finally:
2503
+ # Cancel the memory task if it's still running
2504
+ if memory_task is not None and not memory_task.done():
2505
+ memory_task.cancel()
2506
+ try:
2507
+ await memory_task
2508
+ except asyncio.CancelledError:
2509
+ pass
2282
2510
  # Always clean up the run tracking
2283
2511
  cleanup_run(run_response.run_id) # type: ignore
2284
2512
 
@@ -2288,6 +2516,7 @@ class Team:
2288
2516
  input: Union[str, List, Dict, Message, BaseModel],
2289
2517
  *,
2290
2518
  stream: Literal[False] = False,
2519
+ stream_events: Optional[bool] = None,
2291
2520
  stream_intermediate_steps: Optional[bool] = None,
2292
2521
  session_id: Optional[str] = None,
2293
2522
  session_state: Optional[Dict[str, Any]] = None,
@@ -2313,6 +2542,7 @@ class Team:
2313
2542
  input: Union[str, List, Dict, Message, BaseModel],
2314
2543
  *,
2315
2544
  stream: Literal[True] = True,
2545
+ stream_events: Optional[bool] = None,
2316
2546
  stream_intermediate_steps: Optional[bool] = None,
2317
2547
  session_id: Optional[str] = None,
2318
2548
  session_state: Optional[Dict[str, Any]] = None,
@@ -2338,6 +2568,7 @@ class Team:
2338
2568
  input: Union[str, List, Dict, Message, BaseModel],
2339
2569
  *,
2340
2570
  stream: Optional[bool] = None,
2571
+ stream_events: Optional[bool] = None,
2341
2572
  stream_intermediate_steps: Optional[bool] = None,
2342
2573
  session_id: Optional[str] = None,
2343
2574
  session_state: Optional[Dict[str, Any]] = None,
@@ -2359,6 +2590,11 @@ class Team:
2359
2590
  ) -> Union[TeamRunOutput, AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
2360
2591
  """Run the Team asynchronously and return the response."""
2361
2592
 
2593
+ if (add_history_to_context or self.add_history_to_context) and not self.db and not self.parent_team_id:
2594
+ log_warning(
2595
+ "add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
2596
+ )
2597
+
2362
2598
  # Create a run_id for this specific run
2363
2599
  run_id = str(uuid4())
2364
2600
 
@@ -2373,9 +2609,7 @@ class Team:
2373
2609
  self.post_hooks = normalize_hooks(self.post_hooks, async_mode=True)
2374
2610
  self._hooks_normalised = True
2375
2611
 
2376
- session_id, user_id, session_state = self._initialize_session(
2377
- run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
2378
- )
2612
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
2379
2613
 
2380
2614
  # Initialize Team
2381
2615
  self.initialize_team(debug_mode=debug_mode)
@@ -2409,17 +2643,18 @@ class Team:
2409
2643
  if stream is None:
2410
2644
  stream = False if self.stream is None else self.stream
2411
2645
 
2412
- if stream_intermediate_steps is None:
2413
- stream_intermediate_steps = (
2414
- False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
2415
- )
2646
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
2647
+ stream_events = stream_events or stream_intermediate_steps
2416
2648
 
2417
- # Can't have stream_intermediate_steps if stream is False
2649
+ # Can't stream events if streaming is disabled
2418
2650
  if stream is False:
2419
- stream_intermediate_steps = False
2651
+ stream_events = False
2652
+
2653
+ if stream_events is None:
2654
+ stream_events = False if self.stream_events is None else self.stream_events
2420
2655
 
2421
2656
  self.stream = self.stream or stream
2422
- self.stream_intermediate_steps = self.stream_intermediate_steps or (stream_intermediate_steps and self.stream)
2657
+ self.stream_events = self.stream_events or stream_events
2423
2658
 
2424
2659
  # Configure the model for runs
2425
2660
  response_format: Optional[Union[Dict, Type[BaseModel]]] = (
@@ -2480,7 +2715,7 @@ class Team:
2480
2715
  metadata=metadata,
2481
2716
  response_format=response_format,
2482
2717
  dependencies=run_dependencies,
2483
- stream_intermediate_steps=stream_intermediate_steps,
2718
+ stream_events=stream_events,
2484
2719
  yield_run_response=yield_run_response,
2485
2720
  debug_mode=debug_mode,
2486
2721
  **kwargs,
@@ -2493,10 +2728,6 @@ class Team:
2493
2728
  session_id=session_id,
2494
2729
  session_state=session_state,
2495
2730
  user_id=user_id,
2496
- audio=audio,
2497
- images=images,
2498
- videos=videos,
2499
- files=files,
2500
2731
  knowledge_filters=effective_filters,
2501
2732
  add_history_to_context=add_history,
2502
2733
  add_dependencies_to_context=add_dependencies,
@@ -2631,7 +2862,7 @@ class Team:
2631
2862
  run_response: TeamRunOutput,
2632
2863
  run_messages: RunMessages,
2633
2864
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2634
- stream_intermediate_steps: bool = False,
2865
+ stream_events: bool = False,
2635
2866
  ) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2636
2867
  self.model = cast(Model, self.model)
2637
2868
 
@@ -2662,7 +2893,7 @@ class Team:
2662
2893
  full_model_response=full_model_response,
2663
2894
  model_response_event=model_response_event,
2664
2895
  reasoning_state=reasoning_state,
2665
- stream_intermediate_steps=stream_intermediate_steps,
2896
+ stream_events=stream_events,
2666
2897
  parse_structured_output=self.should_parse_structured_output,
2667
2898
  )
2668
2899
 
@@ -2678,20 +2909,22 @@ class Team:
2678
2909
  if full_model_response.provider_data is not None:
2679
2910
  run_response.model_provider_data = full_model_response.provider_data
2680
2911
 
2681
- if stream_intermediate_steps and reasoning_state["reasoning_started"]:
2912
+ if stream_events and reasoning_state["reasoning_started"]:
2682
2913
  all_reasoning_steps: List[ReasoningStep] = []
2683
2914
  if run_response.reasoning_steps:
2684
2915
  all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
2685
2916
 
2686
2917
  if all_reasoning_steps:
2687
2918
  add_reasoning_metrics_to_metadata(run_response, reasoning_state["reasoning_time_taken"])
2688
- yield self._handle_event(
2919
+ yield handle_event( # type: ignore
2689
2920
  create_team_reasoning_completed_event(
2690
2921
  from_run_response=run_response,
2691
2922
  content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
2692
2923
  content_type=ReasoningSteps.__name__,
2693
2924
  ),
2694
2925
  run_response,
2926
+ events_to_skip=self.events_to_skip,
2927
+ store_events=self.store_events,
2695
2928
  )
2696
2929
 
2697
2930
  # Build a list of messages that should be added to the RunOutput
@@ -2711,7 +2944,7 @@ class Team:
2711
2944
  run_response: TeamRunOutput,
2712
2945
  run_messages: RunMessages,
2713
2946
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2714
- stream_intermediate_steps: bool = False,
2947
+ stream_events: bool = False,
2715
2948
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2716
2949
  self.model = cast(Model, self.model)
2717
2950
 
@@ -2743,7 +2976,7 @@ class Team:
2743
2976
  full_model_response=full_model_response,
2744
2977
  model_response_event=model_response_event,
2745
2978
  reasoning_state=reasoning_state,
2746
- stream_intermediate_steps=stream_intermediate_steps,
2979
+ stream_events=stream_events,
2747
2980
  parse_structured_output=self.should_parse_structured_output,
2748
2981
  ):
2749
2982
  yield event
@@ -2772,20 +3005,22 @@ class Team:
2772
3005
  # Update the TeamRunOutput metrics
2773
3006
  run_response.metrics = self._calculate_metrics(messages_for_run_response)
2774
3007
 
2775
- if stream_intermediate_steps and reasoning_state["reasoning_started"]:
3008
+ if stream_events and reasoning_state["reasoning_started"]:
2776
3009
  all_reasoning_steps: List[ReasoningStep] = []
2777
3010
  if run_response.reasoning_steps:
2778
3011
  all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
2779
3012
 
2780
3013
  if all_reasoning_steps:
2781
3014
  add_reasoning_metrics_to_metadata(run_response, reasoning_state["reasoning_time_taken"])
2782
- yield self._handle_event(
3015
+ yield handle_event( # type: ignore
2783
3016
  create_team_reasoning_completed_event(
2784
3017
  from_run_response=run_response,
2785
3018
  content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
2786
3019
  content_type=ReasoningSteps.__name__,
2787
3020
  ),
2788
3021
  run_response,
3022
+ events_to_skip=self.events_to_skip,
3023
+ store_events=self.store_events,
2789
3024
  )
2790
3025
 
2791
3026
  def _handle_model_response_chunk(
@@ -2795,7 +3030,7 @@ class Team:
2795
3030
  full_model_response: ModelResponse,
2796
3031
  model_response_event: Union[ModelResponse, TeamRunOutputEvent, RunOutputEvent],
2797
3032
  reasoning_state: Optional[Dict[str, Any]] = None,
2798
- stream_intermediate_steps: bool = False,
3033
+ stream_events: bool = False,
2799
3034
  parse_structured_output: bool = False,
2800
3035
  ) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2801
3036
  if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
@@ -2812,7 +3047,12 @@ class Team:
2812
3047
  if not model_response_event.run_id: # type: ignore
2813
3048
  model_response_event.run_id = run_response.run_id # type: ignore
2814
3049
  # We just bubble the event up
2815
- yield self._handle_event(model_response_event, run_response) # type: ignore
3050
+ yield handle_event( # type: ignore
3051
+ model_response_event, # type: ignore
3052
+ run_response,
3053
+ events_to_skip=self.events_to_skip,
3054
+ store_events=self.store_events,
3055
+ ) # type: ignore
2816
3056
  else:
2817
3057
  # Don't yield anything
2818
3058
  return
@@ -2918,7 +3158,7 @@ class Team:
2918
3158
  # Only yield the chunk
2919
3159
  if should_yield:
2920
3160
  if content_type == "str":
2921
- yield self._handle_event(
3161
+ yield handle_event( # type: ignore
2922
3162
  create_team_run_output_content_event(
2923
3163
  from_run_response=run_response,
2924
3164
  content=model_response_event.content,
@@ -2930,15 +3170,19 @@ class Team:
2930
3170
  image=model_response_event.images[-1] if model_response_event.images else None,
2931
3171
  ),
2932
3172
  run_response,
3173
+ events_to_skip=self.events_to_skip,
3174
+ store_events=self.store_events,
2933
3175
  )
2934
3176
  else:
2935
- yield self._handle_event(
3177
+ yield handle_event( # type: ignore
2936
3178
  create_team_run_output_content_event(
2937
3179
  from_run_response=run_response,
2938
3180
  content=full_model_response.content,
2939
3181
  content_type=content_type,
2940
3182
  ),
2941
3183
  run_response,
3184
+ events_to_skip=self.events_to_skip,
3185
+ store_events=self.store_events,
2942
3186
  )
2943
3187
 
2944
3188
  # If the model response is a tool_call_started, add the tool call to the run_response
@@ -2953,12 +3197,14 @@ class Team:
2953
3197
  run_response.tools.extend(tool_executions_list)
2954
3198
 
2955
3199
  for tool in tool_executions_list:
2956
- yield self._handle_event(
3200
+ yield handle_event( # type: ignore
2957
3201
  create_team_tool_call_started_event(
2958
3202
  from_run_response=run_response,
2959
3203
  tool=tool,
2960
3204
  ),
2961
3205
  run_response,
3206
+ events_to_skip=self.events_to_skip,
3207
+ store_events=self.store_events,
2962
3208
  )
2963
3209
 
2964
3210
  # If the model response is a tool_call_completed, update the existing tool call in the run_response
@@ -3016,33 +3262,39 @@ class Team:
3016
3262
  "reasoning_time_taken"
3017
3263
  ] + float(metrics.duration)
3018
3264
 
3019
- yield self._handle_event(
3265
+ yield handle_event( # type: ignore
3020
3266
  create_team_tool_call_completed_event(
3021
3267
  from_run_response=run_response,
3022
3268
  tool=tool_call,
3023
3269
  content=model_response_event.content,
3024
3270
  ),
3025
3271
  run_response,
3272
+ events_to_skip=self.events_to_skip,
3273
+ store_events=self.store_events,
3026
3274
  )
3027
3275
 
3028
- if stream_intermediate_steps:
3276
+ if stream_events:
3029
3277
  if reasoning_step is not None:
3030
3278
  if reasoning_state is not None and not reasoning_state["reasoning_started"]:
3031
- yield self._handle_event(
3279
+ yield handle_event( # type: ignore
3032
3280
  create_team_reasoning_started_event(
3033
3281
  from_run_response=run_response,
3034
3282
  ),
3035
3283
  run_response,
3284
+ events_to_skip=self.events_to_skip,
3285
+ store_events=self.store_events,
3036
3286
  )
3037
3287
  reasoning_state["reasoning_started"] = True
3038
3288
 
3039
- yield self._handle_event(
3289
+ yield handle_event( # type: ignore
3040
3290
  create_team_reasoning_step_event(
3041
3291
  from_run_response=run_response,
3042
3292
  reasoning_step=reasoning_step,
3043
3293
  reasoning_content=run_response.reasoning_content or "",
3044
3294
  ),
3045
3295
  run_response,
3296
+ events_to_skip=self.events_to_skip,
3297
+ store_events=self.store_events,
3046
3298
  )
3047
3299
 
3048
3300
  def _convert_response_to_structured_format(self, run_response: Union[TeamRunOutput, RunOutput, ModelResponse]):
@@ -3083,97 +3335,71 @@ class Team:
3083
3335
  else:
3084
3336
  log_warning("Something went wrong. Member run response content is not a string")
3085
3337
 
3086
- def _make_memories_and_summaries(
3087
- self,
3088
- run_response: TeamRunOutput,
3089
- run_messages: RunMessages,
3090
- session: TeamSession,
3091
- user_id: Optional[str] = None,
3092
- ) -> Iterator[TeamRunOutputEvent]:
3093
- from concurrent.futures import ThreadPoolExecutor, as_completed
3338
+ def _cleanup_and_store(self, run_response: TeamRunOutput, session: TeamSession) -> None:
3339
+ # Scrub the stored run based on storage flags
3340
+ self._scrub_run_output_for_storage(run_response)
3094
3341
 
3095
- # Create a thread pool with a reasonable number of workers
3096
- with ThreadPoolExecutor(max_workers=3) as executor:
3097
- futures = []
3098
- user_message_str = (
3099
- run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3100
- )
3101
- # Create user memories
3102
- if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
3103
- futures.append(
3104
- executor.submit(
3105
- self.memory_manager.create_user_memories,
3106
- message=user_message_str,
3107
- user_id=user_id,
3108
- team_id=self.id,
3109
- )
3110
- )
3342
+ # Stop the timer for the Run duration
3343
+ if run_response.metrics:
3344
+ run_response.metrics.stop_timer()
3111
3345
 
3112
- # Create session summary
3113
- if self.session_summary_manager is not None:
3114
- log_debug("Creating session summary.")
3115
- futures.append(
3116
- executor.submit(
3117
- self.session_summary_manager.create_session_summary, # type: ignore
3118
- session=session,
3119
- )
3120
- )
3346
+ # Add RunOutput to Agent Session
3347
+ session.upsert_run(run_response=run_response)
3121
3348
 
3122
- if futures:
3123
- if self.stream_intermediate_steps:
3124
- yield self._handle_event(
3125
- create_team_memory_update_started_event(from_run_response=run_response), run_response
3126
- )
3349
+ # Calculate session metrics
3350
+ self._update_session_metrics(session=session)
3351
+
3352
+ # Save session to memory
3353
+ self.save_session(session=session)
3354
+
3355
+ async def _acleanup_and_store(self, run_response: TeamRunOutput, session: TeamSession) -> None:
3356
+ # Scrub the stored run based on storage flags
3357
+ self._scrub_run_output_for_storage(run_response)
3358
+
3359
+ # Stop the timer for the Run duration
3360
+ if run_response.metrics:
3361
+ run_response.metrics.stop_timer()
3362
+
3363
+ # Add RunOutput to Agent Session
3364
+ session.upsert_run(run_response=run_response)
3127
3365
 
3128
- # Wait for all operations to complete and handle any errors
3129
- for future in as_completed(futures):
3130
- try:
3131
- future.result()
3132
- except Exception as e:
3133
- log_warning(f"Error in memory/summary operation: {str(e)}")
3366
+ # Calculate session metrics
3367
+ self._update_session_metrics(session=session)
3134
3368
 
3135
- if self.stream_intermediate_steps:
3136
- yield self._handle_event(
3137
- create_team_memory_update_completed_event(from_run_response=run_response),
3138
- run_response,
3139
- )
3369
+ # Save session to memory
3370
+ self.save_session(session=session)
3140
3371
 
3141
- async def _amake_memories_and_summaries(
3372
+ def _make_memories(
3142
3373
  self,
3143
- run_response: TeamRunOutput,
3144
3374
  run_messages: RunMessages,
3145
- session: TeamSession,
3146
3375
  user_id: Optional[str] = None,
3147
- ) -> AsyncIterator[TeamRunOutputEvent]:
3148
- tasks: List[Coroutine] = []
3149
-
3376
+ ):
3150
3377
  user_message_str = (
3151
3378
  run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3152
3379
  )
3153
- if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
3154
- tasks.append(
3155
- self.memory_manager.acreate_user_memories(message=user_message_str, user_id=user_id, team_id=self.id)
3380
+ if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
3381
+ log_debug("Creating user memories.")
3382
+ self.memory_manager.create_user_memories(
3383
+ message=user_message_str,
3384
+ user_id=user_id,
3385
+ team_id=self.id,
3156
3386
  )
3157
3387
 
3158
- if self.session_summary_manager is not None:
3159
- tasks.append(self.session_summary_manager.acreate_session_summary(session=session))
3160
-
3161
- if tasks:
3162
- if self.stream_intermediate_steps:
3163
- yield self._handle_event(
3164
- create_team_memory_update_started_event(from_run_response=run_response), run_response
3165
- )
3166
-
3167
- # Execute all tasks concurrently and handle any errors
3168
- try:
3169
- await asyncio.gather(*tasks)
3170
- except Exception as e:
3171
- log_warning(f"Error in memory/summary operation: {str(e)}")
3172
-
3173
- if self.stream_intermediate_steps:
3174
- yield self._handle_event(
3175
- create_team_memory_update_completed_event(from_run_response=run_response), run_response
3176
- )
3388
+ async def _amake_memories(
3389
+ self,
3390
+ run_messages: RunMessages,
3391
+ user_id: Optional[str] = None,
3392
+ ):
3393
+ user_message_str = (
3394
+ run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
3395
+ )
3396
+ if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
3397
+ log_debug("Creating user memories.")
3398
+ await self.memory_manager.acreate_user_memories(
3399
+ message=user_message_str,
3400
+ user_id=user_id,
3401
+ team_id=self.id,
3402
+ )
3177
3403
 
3178
3404
  def _get_response_format(self, model: Optional[Model] = None) -> Optional[Union[Dict, Type[BaseModel]]]:
3179
3405
  model = cast(Model, model or self.model)
@@ -3269,14 +3495,20 @@ class Team:
3269
3495
  log_warning("A response model is required to parse the response with a parser model")
3270
3496
 
3271
3497
  def _parse_response_with_parser_model_stream(
3272
- self, session: TeamSession, run_response: TeamRunOutput, stream_intermediate_steps: bool = True
3498
+ self,
3499
+ session: TeamSession,
3500
+ run_response: TeamRunOutput,
3501
+ stream_events: bool = False,
3273
3502
  ):
3274
3503
  """Parse the model response using the parser model"""
3275
3504
  if self.parser_model is not None:
3276
3505
  if self.output_schema is not None:
3277
- if stream_intermediate_steps:
3278
- yield self._handle_event(
3279
- create_team_parser_model_response_started_event(run_response), run_response
3506
+ if stream_events:
3507
+ yield handle_event( # type: ignore
3508
+ create_team_parser_model_response_started_event(run_response),
3509
+ run_response,
3510
+ events_to_skip=self.events_to_skip,
3511
+ store_events=self.store_events,
3280
3512
  )
3281
3513
 
3282
3514
  parser_model_response = ModelResponse(content="")
@@ -3295,7 +3527,7 @@ class Team:
3295
3527
  full_model_response=parser_model_response,
3296
3528
  model_response_event=model_response_event,
3297
3529
  parse_structured_output=True,
3298
- stream_intermediate_steps=stream_intermediate_steps,
3530
+ stream_events=stream_events,
3299
3531
  )
3300
3532
 
3301
3533
  run_response.content = parser_model_response.content
@@ -3311,23 +3543,29 @@ class Team:
3311
3543
  else:
3312
3544
  log_warning("Unable to parse response with parser model")
3313
3545
 
3314
- if stream_intermediate_steps:
3315
- yield self._handle_event(
3316
- create_team_parser_model_response_completed_event(run_response), run_response
3546
+ if stream_events:
3547
+ yield handle_event( # type: ignore
3548
+ create_team_parser_model_response_completed_event(run_response),
3549
+ run_response,
3550
+ events_to_skip=self.events_to_skip,
3551
+ store_events=self.store_events,
3317
3552
  )
3318
3553
 
3319
3554
  else:
3320
3555
  log_warning("A response model is required to parse the response with a parser model")
3321
3556
 
3322
3557
  async def _aparse_response_with_parser_model_stream(
3323
- self, session: TeamSession, run_response: TeamRunOutput, stream_intermediate_steps: bool = True
3558
+ self, session: TeamSession, run_response: TeamRunOutput, stream_events: bool = False
3324
3559
  ):
3325
3560
  """Parse the model response using the parser model stream."""
3326
3561
  if self.parser_model is not None:
3327
3562
  if self.output_schema is not None:
3328
- if stream_intermediate_steps:
3329
- yield self._handle_event(
3330
- create_team_parser_model_response_started_event(run_response), run_response
3563
+ if stream_events:
3564
+ yield handle_event( # type: ignore
3565
+ create_team_parser_model_response_started_event(run_response),
3566
+ run_response,
3567
+ events_to_skip=self.events_to_skip,
3568
+ store_events=self.store_events,
3331
3569
  )
3332
3570
 
3333
3571
  parser_model_response = ModelResponse(content="")
@@ -3347,7 +3585,7 @@ class Team:
3347
3585
  full_model_response=parser_model_response,
3348
3586
  model_response_event=model_response_event,
3349
3587
  parse_structured_output=True,
3350
- stream_intermediate_steps=stream_intermediate_steps,
3588
+ stream_events=stream_events,
3351
3589
  ):
3352
3590
  yield event
3353
3591
 
@@ -3364,9 +3602,12 @@ class Team:
3364
3602
  else:
3365
3603
  log_warning("Unable to parse response with parser model")
3366
3604
 
3367
- if stream_intermediate_steps:
3368
- yield self._handle_event(
3369
- create_team_parser_model_response_completed_event(run_response), run_response
3605
+ if stream_events:
3606
+ yield handle_event( # type: ignore
3607
+ create_team_parser_model_response_completed_event(run_response),
3608
+ run_response,
3609
+ events_to_skip=self.events_to_skip,
3610
+ store_events=self.store_events,
3370
3611
  )
3371
3612
  else:
3372
3613
  log_warning("A response model is required to parse the response with a parser model")
@@ -3385,7 +3626,7 @@ class Team:
3385
3626
  session: TeamSession,
3386
3627
  run_response: TeamRunOutput,
3387
3628
  run_messages: RunMessages,
3388
- stream_intermediate_steps: bool = False,
3629
+ stream_events: bool = False,
3389
3630
  ):
3390
3631
  """Parse the model response using the output model stream."""
3391
3632
  from agno.utils.events import (
@@ -3396,8 +3637,13 @@ class Team:
3396
3637
  if self.output_model is None:
3397
3638
  return
3398
3639
 
3399
- if stream_intermediate_steps:
3400
- yield self._handle_event(create_team_output_model_response_started_event(run_response), run_response)
3640
+ if stream_events:
3641
+ yield handle_event( # type: ignore
3642
+ create_team_output_model_response_started_event(run_response),
3643
+ run_response,
3644
+ events_to_skip=self.events_to_skip,
3645
+ store_events=self.store_events,
3646
+ )
3401
3647
 
3402
3648
  messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
3403
3649
  model_response = ModelResponse(content="")
@@ -3413,8 +3659,13 @@ class Team:
3413
3659
  # Update the TeamRunResponse content
3414
3660
  run_response.content = model_response.content
3415
3661
 
3416
- if stream_intermediate_steps:
3417
- yield self._handle_event(create_team_output_model_response_completed_event(run_response), run_response)
3662
+ if stream_events:
3663
+ yield handle_event( # type: ignore
3664
+ create_team_output_model_response_completed_event(run_response),
3665
+ run_response,
3666
+ events_to_skip=self.events_to_skip,
3667
+ store_events=self.store_events,
3668
+ )
3418
3669
 
3419
3670
  # Build a list of messages that should be added to the RunResponse
3420
3671
  messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
@@ -3439,7 +3690,7 @@ class Team:
3439
3690
  session: TeamSession,
3440
3691
  run_response: TeamRunOutput,
3441
3692
  run_messages: RunMessages,
3442
- stream_intermediate_steps: bool = False,
3693
+ stream_events: bool = False,
3443
3694
  ):
3444
3695
  """Parse the model response using the output model stream."""
3445
3696
  from agno.utils.events import (
@@ -3450,8 +3701,13 @@ class Team:
3450
3701
  if self.output_model is None:
3451
3702
  return
3452
3703
 
3453
- if stream_intermediate_steps:
3454
- yield self._handle_event(create_team_output_model_response_started_event(run_response), run_response)
3704
+ if stream_events:
3705
+ yield handle_event( # type: ignore
3706
+ create_team_output_model_response_started_event(run_response),
3707
+ run_response,
3708
+ events_to_skip=self.events_to_skip,
3709
+ store_events=self.store_events,
3710
+ )
3455
3711
 
3456
3712
  messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
3457
3713
  model_response = ModelResponse(content="")
@@ -3468,8 +3724,13 @@ class Team:
3468
3724
  # Update the TeamRunResponse content
3469
3725
  run_response.content = model_response.content
3470
3726
 
3471
- if stream_intermediate_steps:
3472
- yield self._handle_event(create_team_output_model_response_completed_event(run_response), run_response)
3727
+ if stream_events:
3728
+ yield handle_event( # type: ignore
3729
+ create_team_output_model_response_completed_event(run_response),
3730
+ run_response,
3731
+ events_to_skip=self.events_to_skip,
3732
+ store_events=self.store_events,
3733
+ )
3473
3734
 
3474
3735
  # Build a list of messages that should be added to the RunResponse
3475
3736
  messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
@@ -3500,6 +3761,7 @@ class Team:
3500
3761
  input: Union[List, Dict, str, Message, BaseModel, List[Message]],
3501
3762
  *,
3502
3763
  stream: Optional[bool] = None,
3764
+ stream_events: Optional[bool] = None,
3503
3765
  stream_intermediate_steps: Optional[bool] = None,
3504
3766
  session_id: Optional[str] = None,
3505
3767
  session_state: Optional[Dict[str, Any]] = None,
@@ -3540,8 +3802,15 @@ class Team:
3540
3802
  if stream is None:
3541
3803
  stream = self.stream or False
3542
3804
 
3543
- if stream_intermediate_steps is None:
3544
- stream_intermediate_steps = self.stream_intermediate_steps or False
3805
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
3806
+ stream_events = stream_events or stream_intermediate_steps
3807
+
3808
+ # Can't stream events if streaming is disabled
3809
+ if stream is False:
3810
+ stream_events = False
3811
+
3812
+ if stream_events is None:
3813
+ stream_events = False if self.stream_events is None else self.stream_events
3545
3814
 
3546
3815
  if stream:
3547
3816
  print_response_stream(
@@ -3560,7 +3829,7 @@ class Team:
3560
3829
  videos=videos,
3561
3830
  files=files,
3562
3831
  markdown=markdown,
3563
- stream_intermediate_steps=stream_intermediate_steps,
3832
+ stream_events=stream_events,
3564
3833
  knowledge_filters=knowledge_filters,
3565
3834
  add_history_to_context=add_history_to_context,
3566
3835
  dependencies=dependencies,
@@ -3602,6 +3871,7 @@ class Team:
3602
3871
  input: Union[List, Dict, str, Message, BaseModel, List[Message]],
3603
3872
  *,
3604
3873
  stream: Optional[bool] = None,
3874
+ stream_events: Optional[bool] = None,
3605
3875
  stream_intermediate_steps: Optional[bool] = None,
3606
3876
  session_id: Optional[str] = None,
3607
3877
  session_state: Optional[Dict[str, Any]] = None,
@@ -3637,8 +3907,15 @@ class Team:
3637
3907
  if stream is None:
3638
3908
  stream = self.stream or False
3639
3909
 
3640
- if stream_intermediate_steps is None:
3641
- stream_intermediate_steps = self.stream_intermediate_steps or False
3910
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
3911
+ stream_events = stream_events or stream_intermediate_steps
3912
+
3913
+ # Can't stream events if streaming is disabled
3914
+ if stream is False:
3915
+ stream_events = False
3916
+
3917
+ if stream_events is None:
3918
+ stream_events = False if self.stream_events is None else self.stream_events
3642
3919
 
3643
3920
  if stream:
3644
3921
  await aprint_response_stream(
@@ -3657,7 +3934,7 @@ class Team:
3657
3934
  videos=videos,
3658
3935
  files=files,
3659
3936
  markdown=markdown,
3660
- stream_intermediate_steps=stream_intermediate_steps,
3937
+ stream_events=stream_events,
3661
3938
  knowledge_filters=knowledge_filters,
3662
3939
  add_history_to_context=add_history_to_context,
3663
3940
  dependencies=dependencies,
@@ -3704,88 +3981,6 @@ class Team:
3704
3981
  return member.name or entity_id
3705
3982
  return entity_id
3706
3983
 
3707
- def _scrub_media_from_run_output(self, run_response: TeamRunOutput) -> None:
3708
- """
3709
- Completely remove all media from RunOutput when store_media=False.
3710
- This includes media in input, output artifacts, and all messages.
3711
- """
3712
- # 1. Scrub RunInput media
3713
- if run_response.input is not None:
3714
- run_response.input.images = []
3715
- run_response.input.videos = []
3716
- run_response.input.audios = []
3717
- run_response.input.files = []
3718
-
3719
- # 3. Scrub media from all messages
3720
- if run_response.messages:
3721
- for message in run_response.messages:
3722
- self._scrub_media_from_message(message)
3723
-
3724
- # 4. Scrub media from additional_input messages if any
3725
- if run_response.additional_input:
3726
- for message in run_response.additional_input:
3727
- self._scrub_media_from_message(message)
3728
-
3729
- # 5. Scrub media from reasoning_messages if any
3730
- if run_response.reasoning_messages:
3731
- for message in run_response.reasoning_messages:
3732
- self._scrub_media_from_message(message)
3733
-
3734
- def _scrub_media_from_message(self, message: Message) -> None:
3735
- """Remove all media from a Message object."""
3736
- # Input media
3737
- message.images = None
3738
- message.videos = None
3739
- message.audio = None
3740
- message.files = None
3741
-
3742
- # Output media
3743
- message.audio_output = None
3744
- message.image_output = None
3745
- message.video_output = None
3746
-
3747
- def _scrub_tool_results_from_run_output(self, run_response: TeamRunOutput) -> None:
3748
- """
3749
- Remove all tool-related data from RunOutput when store_tool_messages=False.
3750
- This removes both the tool call and its corresponding result to maintain API consistency.
3751
- """
3752
- if not run_response.messages:
3753
- return
3754
-
3755
- # Step 1: Collect all tool_call_ids from tool result messages
3756
- tool_call_ids_to_remove = set()
3757
- for message in run_response.messages:
3758
- if message.role == "tool" and message.tool_call_id:
3759
- tool_call_ids_to_remove.add(message.tool_call_id)
3760
-
3761
- # Step 2: Remove tool result messages (role="tool")
3762
- run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
3763
-
3764
- # Step 3: Remove assistant messages that made those tool calls
3765
- filtered_messages = []
3766
- for message in run_response.messages:
3767
- # Check if this assistant message made any of the tool calls we're removing
3768
- should_remove = False
3769
- if message.role == "assistant" and message.tool_calls:
3770
- for tool_call in message.tool_calls:
3771
- if tool_call.get("id") in tool_call_ids_to_remove:
3772
- should_remove = True
3773
- break
3774
-
3775
- if not should_remove:
3776
- filtered_messages.append(message)
3777
-
3778
- run_response.messages = filtered_messages
3779
-
3780
- def _scrub_history_messages_from_run_output(self, run_response: TeamRunOutput) -> None:
3781
- """
3782
- Remove all history messages from TeamRunOutput when store_history_messages=False.
3783
- This removes messages that were loaded from the team's memory.
3784
- """
3785
- # Remove messages with from_history=True
3786
- if run_response.messages:
3787
- run_response.messages = [msg for msg in run_response.messages if not msg.from_history]
3788
-
3789
3984
  def _scrub_run_output_for_storage(self, run_response: TeamRunOutput) -> bool:
3790
3985
  """
3791
3986
  Scrub run output based on storage flags before persisting to database.
@@ -3794,15 +3989,15 @@ class Team:
3794
3989
  scrubbed = False
3795
3990
 
3796
3991
  if not self.store_media:
3797
- self._scrub_media_from_run_output(run_response)
3992
+ scrub_media_from_run_output(run_response)
3798
3993
  scrubbed = True
3799
3994
 
3800
3995
  if not self.store_tool_messages:
3801
- self._scrub_tool_results_from_run_output(run_response)
3996
+ scrub_tool_results_from_run_output(run_response)
3802
3997
  scrubbed = True
3803
3998
 
3804
3999
  if not self.store_history_messages:
3805
- self._scrub_history_messages_from_run_output(run_response)
4000
+ scrub_history_messages_from_run_output(run_response)
3806
4001
  scrubbed = True
3807
4002
 
3808
4003
  return scrubbed
@@ -3959,32 +4154,42 @@ class Team:
3959
4154
  # Helpers
3960
4155
  ###########################################################################
3961
4156
 
3962
- def _handle_reasoning(self, run_response: TeamRunOutput, run_messages: RunMessages) -> None:
4157
+ def _handle_reasoning(self, run_response: TeamRunOutput, run_messages: RunMessages):
3963
4158
  if self.reasoning or self.reasoning_model is not None:
3964
- reasoning_generator = self._reason(run_response=run_response, run_messages=run_messages)
4159
+ reasoning_generator = self._reason(
4160
+ run_response=run_response, run_messages=run_messages, stream_events=False
4161
+ )
3965
4162
 
3966
4163
  # Consume the generator without yielding
3967
4164
  deque(reasoning_generator, maxlen=0)
3968
4165
 
3969
4166
  def _handle_reasoning_stream(
3970
- self, run_response: TeamRunOutput, run_messages: RunMessages
4167
+ self, run_response: TeamRunOutput, run_messages: RunMessages, stream_events: bool
3971
4168
  ) -> Iterator[TeamRunOutputEvent]:
3972
4169
  if self.reasoning or self.reasoning_model is not None:
3973
- reasoning_generator = self._reason(run_response=run_response, run_messages=run_messages)
4170
+ reasoning_generator = self._reason(
4171
+ run_response=run_response,
4172
+ run_messages=run_messages,
4173
+ stream_events=stream_events,
4174
+ )
3974
4175
  yield from reasoning_generator
3975
4176
 
3976
4177
  async def _ahandle_reasoning(self, run_response: TeamRunOutput, run_messages: RunMessages) -> None:
3977
4178
  if self.reasoning or self.reasoning_model is not None:
3978
- reason_generator = self._areason(run_response=run_response, run_messages=run_messages)
4179
+ reason_generator = self._areason(run_response=run_response, run_messages=run_messages, stream_events=False)
3979
4180
  # Consume the generator without yielding
3980
4181
  async for _ in reason_generator:
3981
4182
  pass
3982
4183
 
3983
4184
  async def _ahandle_reasoning_stream(
3984
- self, run_response: TeamRunOutput, run_messages: RunMessages
4185
+ self, run_response: TeamRunOutput, run_messages: RunMessages, stream_events: bool
3985
4186
  ) -> AsyncIterator[TeamRunOutputEvent]:
3986
4187
  if self.reasoning or self.reasoning_model is not None:
3987
- reason_generator = self._areason(run_response=run_response, run_messages=run_messages)
4188
+ reason_generator = self._areason(
4189
+ run_response=run_response,
4190
+ run_messages=run_messages,
4191
+ stream_events=stream_events,
4192
+ )
3988
4193
  async for item in reason_generator:
3989
4194
  yield item
3990
4195
 
@@ -4062,9 +4267,15 @@ class Team:
4062
4267
  self,
4063
4268
  run_response: TeamRunOutput,
4064
4269
  run_messages: RunMessages,
4270
+ stream_events: bool,
4065
4271
  ) -> Iterator[TeamRunOutputEvent]:
4066
- if self.stream_intermediate_steps:
4067
- yield self._handle_event(create_team_reasoning_started_event(from_run_response=run_response), run_response)
4272
+ if stream_events:
4273
+ yield handle_event( # type: ignore
4274
+ create_team_reasoning_started_event(from_run_response=run_response),
4275
+ run_response,
4276
+ events_to_skip=self.events_to_skip,
4277
+ store_events=self.store_events,
4278
+ )
4068
4279
 
4069
4280
  use_default_reasoning = False
4070
4281
 
@@ -4185,14 +4396,16 @@ class Team:
4185
4396
  reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
4186
4397
  reasoning_agent_messages=[reasoning_message],
4187
4398
  )
4188
- if self.stream_intermediate_steps:
4189
- yield self._handle_event(
4399
+ if stream_events:
4400
+ yield handle_event( # type: ignore
4190
4401
  create_team_reasoning_completed_event(
4191
4402
  from_run_response=run_response,
4192
4403
  content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
4193
4404
  content_type=ReasoningSteps.__name__,
4194
4405
  ),
4195
4406
  run_response,
4407
+ events_to_skip=self.events_to_skip,
4408
+ store_events=self.store_events,
4196
4409
  )
4197
4410
  else:
4198
4411
  log_warning(
@@ -4270,19 +4483,21 @@ class Team:
4270
4483
  reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
4271
4484
  all_reasoning_steps.extend(reasoning_steps)
4272
4485
  # Yield reasoning steps
4273
- if self.stream_intermediate_steps:
4486
+ if stream_events:
4274
4487
  for reasoning_step in reasoning_steps:
4275
4488
  updated_reasoning_content = self._format_reasoning_step_content(
4276
4489
  run_response, reasoning_step
4277
4490
  )
4278
4491
 
4279
- yield self._handle_event(
4492
+ yield handle_event( # type: ignore
4280
4493
  create_team_reasoning_step_event(
4281
4494
  from_run_response=run_response,
4282
4495
  reasoning_step=reasoning_step,
4283
4496
  reasoning_content=updated_reasoning_content,
4284
4497
  ),
4285
4498
  run_response,
4499
+ events_to_skip=self.events_to_skip,
4500
+ store_events=self.store_events,
4286
4501
  )
4287
4502
 
4288
4503
  # Find the index of the first assistant message
@@ -4318,23 +4533,31 @@ class Team:
4318
4533
  )
4319
4534
 
4320
4535
  # Yield the final reasoning completed event
4321
- if self.stream_intermediate_steps:
4322
- yield self._handle_event(
4536
+ if stream_events:
4537
+ yield handle_event( # type: ignore
4323
4538
  create_team_reasoning_completed_event(
4324
4539
  from_run_response=run_response,
4325
4540
  content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
4326
4541
  content_type=ReasoningSteps.__name__,
4327
4542
  ),
4328
4543
  run_response,
4544
+ events_to_skip=self.events_to_skip,
4545
+ store_events=self.store_events,
4329
4546
  )
4330
4547
 
4331
4548
  async def _areason(
4332
4549
  self,
4333
4550
  run_response: TeamRunOutput,
4334
4551
  run_messages: RunMessages,
4552
+ stream_events: bool,
4335
4553
  ) -> AsyncIterator[TeamRunOutputEvent]:
4336
- if self.stream_intermediate_steps:
4337
- yield self._handle_event(create_team_reasoning_started_event(from_run_response=run_response), run_response)
4554
+ if stream_events:
4555
+ yield handle_event( # type: ignore
4556
+ create_team_reasoning_started_event(from_run_response=run_response),
4557
+ run_response,
4558
+ events_to_skip=self.events_to_skip,
4559
+ store_events=self.store_events,
4560
+ )
4338
4561
 
4339
4562
  use_default_reasoning = False
4340
4563
 
@@ -4454,14 +4677,16 @@ class Team:
4454
4677
  reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
4455
4678
  reasoning_agent_messages=[reasoning_message],
4456
4679
  )
4457
- if self.stream_intermediate_steps:
4458
- yield self._handle_event(
4680
+ if stream_events:
4681
+ yield handle_event( # type: ignore
4459
4682
  create_team_reasoning_completed_event(
4460
4683
  from_run_response=run_response,
4461
4684
  content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
4462
4685
  content_type=ReasoningSteps.__name__,
4463
4686
  ),
4464
4687
  run_response,
4688
+ events_to_skip=self.events_to_skip,
4689
+ store_events=self.store_events,
4465
4690
  )
4466
4691
  else:
4467
4692
  log_warning(
@@ -4538,19 +4763,21 @@ class Team:
4538
4763
  reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
4539
4764
  all_reasoning_steps.extend(reasoning_steps)
4540
4765
  # Yield reasoning steps
4541
- if self.stream_intermediate_steps:
4766
+ if stream_events:
4542
4767
  for reasoning_step in reasoning_steps:
4543
4768
  updated_reasoning_content = self._format_reasoning_step_content(
4544
4769
  run_response, reasoning_step
4545
4770
  )
4546
4771
 
4547
- yield self._handle_event(
4772
+ yield handle_event( # type: ignore
4548
4773
  create_team_reasoning_step_event(
4549
4774
  from_run_response=run_response,
4550
4775
  reasoning_step=reasoning_step,
4551
4776
  reasoning_content=updated_reasoning_content,
4552
4777
  ),
4553
4778
  run_response,
4779
+ events_to_skip=self.events_to_skip,
4780
+ store_events=self.store_events,
4554
4781
  )
4555
4782
 
4556
4783
  # Find the index of the first assistant message
@@ -4586,14 +4813,16 @@ class Team:
4586
4813
  )
4587
4814
 
4588
4815
  # Yield the final reasoning completed event
4589
- if self.stream_intermediate_steps:
4590
- yield self._handle_event(
4816
+ if stream_events:
4817
+ yield handle_event( # type: ignore # type: ignore
4591
4818
  create_team_reasoning_completed_event(
4592
4819
  from_run_response=run_response,
4593
4820
  content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
4594
4821
  content_type=ReasoningSteps.__name__,
4595
4822
  ),
4596
4823
  run_response,
4824
+ events_to_skip=self.events_to_skip,
4825
+ store_events=self.store_events,
4597
4826
  )
4598
4827
 
4599
4828
  def _resolve_run_dependencies(self, dependencies: Optional[Dict[str, Any]] = None) -> None:
@@ -4641,137 +4870,6 @@ class Team:
4641
4870
  except Exception as e:
4642
4871
  log_warning(f"Failed to resolve context for '{key}': {e}")
4643
4872
 
4644
- def _collect_joint_images(
4645
- self,
4646
- run_input: Optional[TeamRunInput] = None,
4647
- session: Optional[TeamSession] = None,
4648
- ) -> Optional[Sequence[Image]]:
4649
- """Collect images from input, session history, and current run response."""
4650
- joint_images: List[Image] = []
4651
-
4652
- # 1. Add images from current input
4653
- if run_input and run_input.images:
4654
- joint_images.extend(run_input.images)
4655
- log_debug(f"Added {len(run_input.images)} input images to joint list")
4656
-
4657
- # 2. Add images from session history (from both input and generated sources)
4658
- try:
4659
- if session and session.runs:
4660
- for historical_run in session.runs:
4661
- # Add generated images from previous runs
4662
- if historical_run.images:
4663
- joint_images.extend(historical_run.images)
4664
- log_debug(
4665
- f"Added {len(historical_run.images)} generated images from historical run {historical_run.run_id}"
4666
- )
4667
-
4668
- # Add input images from previous runs
4669
- if historical_run.input and historical_run.input.images:
4670
- joint_images.extend(historical_run.input.images)
4671
- log_debug(
4672
- f"Added {len(historical_run.input.images)} input images from historical run {historical_run.run_id}"
4673
- )
4674
- except Exception as e:
4675
- log_debug(f"Could not access session history for images: {e}")
4676
-
4677
- if joint_images:
4678
- log_debug(f"Images Available to Model: {len(joint_images)} images")
4679
- return joint_images if joint_images else None
4680
-
4681
- def _collect_joint_videos(
4682
- self,
4683
- run_input: Optional[TeamRunInput] = None,
4684
- session: Optional[TeamSession] = None,
4685
- ) -> Optional[Sequence[Video]]:
4686
- """Collect videos from input, session history, and current run response."""
4687
- joint_videos: List[Video] = []
4688
-
4689
- # 1. Add videos from current input
4690
- if run_input and run_input.videos:
4691
- joint_videos.extend(run_input.videos)
4692
- log_debug(f"Added {len(run_input.videos)} input videos to joint list")
4693
-
4694
- # 2. Add videos from session history (from both input and generated sources)
4695
- try:
4696
- if session and session.runs:
4697
- for historical_run in session.runs:
4698
- # Add generated videos from previous runs
4699
- if historical_run.videos:
4700
- joint_videos.extend(historical_run.videos)
4701
- log_debug(
4702
- f"Added {len(historical_run.videos)} generated videos from historical run {historical_run.run_id}"
4703
- )
4704
-
4705
- # Add input videos from previous runs
4706
- if historical_run.input and historical_run.input.videos:
4707
- joint_videos.extend(historical_run.input.videos)
4708
- log_debug(
4709
- f"Added {len(historical_run.input.videos)} input videos from historical run {historical_run.run_id}"
4710
- )
4711
- except Exception as e:
4712
- log_debug(f"Could not access session history for videos: {e}")
4713
-
4714
- if joint_videos:
4715
- log_debug(f"Videos Available to Model: {len(joint_videos)} videos")
4716
- return joint_videos if joint_videos else None
4717
-
4718
- def _collect_joint_audios(
4719
- self,
4720
- run_input: Optional[TeamRunInput] = None,
4721
- session: Optional[TeamSession] = None,
4722
- ) -> Optional[Sequence[Audio]]:
4723
- """Collect audios from input, session history, and current run response."""
4724
- joint_audios: List[Audio] = []
4725
-
4726
- # 1. Add audios from current input
4727
- if run_input and run_input.audios:
4728
- joint_audios.extend(run_input.audios)
4729
- log_debug(f"Added {len(run_input.audios)} input audios to joint list")
4730
-
4731
- # 2. Add audios from session history (from both input and generated sources)
4732
- try:
4733
- if session and session.runs:
4734
- for historical_run in session.runs:
4735
- # Add generated audios from previous runs
4736
- if historical_run.audio:
4737
- joint_audios.extend(historical_run.audio)
4738
- log_debug(
4739
- f"Added {len(historical_run.audio)} generated audios from historical run {historical_run.run_id}"
4740
- )
4741
-
4742
- # Add input audios from previous runs
4743
- if historical_run.input and historical_run.input.audios:
4744
- joint_audios.extend(historical_run.input.audios)
4745
- log_debug(
4746
- f"Added {len(historical_run.input.audios)} input audios from historical run {historical_run.run_id}"
4747
- )
4748
- except Exception as e:
4749
- log_debug(f"Could not access session history for audios: {e}")
4750
-
4751
- if joint_audios:
4752
- log_debug(f"Audios Available to Model: {len(joint_audios)} audios")
4753
- return joint_audios if joint_audios else None
4754
-
4755
- def _collect_joint_files(
4756
- self,
4757
- run_input: Optional[TeamRunInput] = None,
4758
- ) -> Optional[Sequence[File]]:
4759
- """Collect files from input and session history."""
4760
- from agno.utils.log import log_debug
4761
-
4762
- joint_files: List[File] = []
4763
-
4764
- # 1. Add files from current input
4765
- if run_input and run_input.files:
4766
- joint_files.extend(run_input.files)
4767
-
4768
- # TODO: Files aren't stored in session history yet and dont have a FileArtifact
4769
-
4770
- if joint_files:
4771
- log_debug(f"Files Available to Model: {len(joint_files)} files")
4772
-
4773
- return joint_files if joint_files else None
4774
-
4775
4873
  def determine_tools_for_model(
4776
4874
  self,
4777
4875
  model: Model,
@@ -4802,14 +4900,14 @@ class Team:
4802
4900
  for tool in self.tools:
4803
4901
  _tools.append(tool)
4804
4902
 
4805
- if self.read_team_history:
4806
- _tools.append(self._get_team_history_function(session=session))
4903
+ if self.read_chat_history:
4904
+ _tools.append(self._get_chat_history_function(session=session, async_mode=async_mode))
4807
4905
 
4808
4906
  if self.memory_manager is not None and self.enable_agentic_memory:
4809
4907
  _tools.append(self._get_update_user_memory_function(user_id=user_id, async_mode=async_mode))
4810
4908
 
4811
4909
  if self.enable_agentic_state:
4812
- _tools.append(self.update_session_state)
4910
+ _tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
4813
4911
 
4814
4912
  if self.search_session_history:
4815
4913
  _tools.append(
@@ -4848,7 +4946,7 @@ class Team:
4848
4946
 
4849
4947
  if self.members:
4850
4948
  # Get the user message if we are using the input directly
4851
- user_message = None
4949
+ user_message_content = None
4852
4950
  if self.determine_input_for_members is False:
4853
4951
  user_message = self._get_user_message(
4854
4952
  run_response=run_response,
@@ -4863,16 +4961,17 @@ class Team:
4863
4961
  add_dependencies_to_context=add_dependencies_to_context,
4864
4962
  metadata=metadata,
4865
4963
  )
4964
+ user_message_content = user_message.content if user_message is not None else None
4866
4965
 
4867
4966
  delegate_task_func = self._get_delegate_task_function(
4868
4967
  run_response=run_response,
4869
4968
  session=session,
4870
4969
  session_state=session_state,
4871
4970
  team_run_context=team_run_context,
4872
- input=user_message,
4971
+ input=user_message_content,
4873
4972
  user_id=user_id,
4874
4973
  stream=self.stream or False,
4875
- stream_intermediate_steps=self.stream_intermediate_steps,
4974
+ stream_events=self.stream_events or False,
4876
4975
  async_mode=async_mode,
4877
4976
  images=images, # type: ignore
4878
4977
  videos=videos, # type: ignore
@@ -4983,10 +5082,10 @@ class Team:
4983
5082
 
4984
5083
  if needs_media:
4985
5084
  # Only collect media if functions actually need them
4986
- joint_images = self._collect_joint_images(run_response.input, session)
4987
- joint_files = self._collect_joint_files(run_response.input)
4988
- joint_audios = self._collect_joint_audios(run_response.input, session)
4989
- joint_videos = self._collect_joint_videos(run_response.input, session)
5085
+ joint_images = collect_joint_images(run_response.input, session) # type: ignore
5086
+ joint_files = collect_joint_files(run_response.input) # type: ignore
5087
+ joint_audios = collect_joint_audios(run_response.input, session) # type: ignore
5088
+ joint_videos = collect_joint_videos(run_response.input, session) # type: ignore
4990
5089
 
4991
5090
  for func in self._functions_for_model.values():
4992
5091
  func._images = joint_images
@@ -5183,12 +5282,11 @@ class Team:
5183
5282
  "- You cannot use a member tool directly. You can only delegate tasks to members.\n"
5184
5283
  "- When you delegate a task to another member, make sure to include:\n"
5185
5284
  " - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
5186
- " - task_description (str): A clear description of the task.\n"
5187
- " - expected_output (str): The expected output.\n"
5285
+ " - task (str): A clear description of the task. Determine the best way to describe the task to the member.\n"
5188
5286
  "- You can delegate tasks to multiple members at once.\n"
5189
5287
  "- You must always analyze the responses from members before responding to the user.\n"
5190
5288
  "- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
5191
- "- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
5289
+ "- If you are NOT satisfied with the responses from the members, you should re-assign the task to a different member.\n"
5192
5290
  "- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
5193
5291
  "- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
5194
5292
  )
@@ -5477,8 +5575,7 @@ class Team:
5477
5575
  "- You cannot use a member tool directly. You can only delegate tasks to members.\n"
5478
5576
  "- When you delegate a task to another member, make sure to include:\n"
5479
5577
  " - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
5480
- " - task_description (str): A clear description of the task.\n"
5481
- " - expected_output (str): The expected output.\n"
5578
+ " - task (str): A clear description of the task.\n"
5482
5579
  "- You can delegate tasks to multiple members at once.\n"
5483
5580
  "- You must always analyze the responses from members before responding to the user.\n"
5484
5581
  "- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
@@ -5733,7 +5830,6 @@ class Team:
5733
5830
  run_messages.messages += history_copy
5734
5831
 
5735
5832
  # 5. Add user message to run_messages (message second as per Dirk's requirement)
5736
- user_message: Optional[Message] = None
5737
5833
  # 5.1 Build user message if message is None, str or list
5738
5834
  user_message = self._get_user_message(
5739
5835
  run_response=run_response,
@@ -5861,7 +5957,6 @@ class Team:
5861
5957
  run_messages.messages += history_copy
5862
5958
 
5863
5959
  # 5. Add user message to run_messages (message second as per Dirk's requirement)
5864
- user_message: Optional[Message] = None
5865
5960
  # 5.1 Build user message if message is None, str or list
5866
5961
  user_message = self._get_user_message(
5867
5962
  run_response=run_response,
@@ -6305,10 +6400,15 @@ class Team:
6305
6400
  """Get information about the members of the team, including their IDs, names, and roles."""
6306
6401
  return self.get_members_system_message_content(indent=0)
6307
6402
 
6308
- def _get_team_history_function(self, session: TeamSession) -> Callable:
6309
- def get_team_history(num_chats: Optional[int] = None) -> str:
6403
+ def _get_chat_history_function(self, session: TeamSession, async_mode: bool = False):
6404
+ def get_chat_history(num_chats: Optional[int] = None) -> str:
6310
6405
  """
6311
- Use this function to get the team chat history.
6406
+ Use this function to get the team chat history in reverse chronological order.
6407
+ Leave the num_chats parameter blank to get the entire chat history.
6408
+ Example:
6409
+ - To get the last chat, use num_chats=1
6410
+ - To get the last 5 chats, use num_chats=5
6411
+ - To get all chats, leave num_chats blank
6312
6412
 
6313
6413
  Args:
6314
6414
  num_chats: The number of chats to return.
@@ -6317,12 +6417,42 @@ class Team:
6317
6417
 
6318
6418
  Returns:
6319
6419
  str: A JSON string containing a list of dictionaries representing the team chat history.
6420
+ """
6421
+ import json
6422
+
6423
+ history: List[Dict[str, Any]] = []
6424
+
6425
+ all_chats = session.get_messages_from_last_n_runs(
6426
+ team_id=self.id,
6427
+ )
6428
+
6429
+ if len(all_chats) == 0:
6430
+ return ""
6431
+
6432
+ for chat in all_chats[::-1]: # type: ignore
6433
+ history.insert(0, chat.to_dict()) # type: ignore
6434
+
6435
+ if num_chats is not None:
6436
+ history = history[:num_chats]
6437
+
6438
+ return json.dumps(history)
6320
6439
 
6440
+ async def aget_chat_history(num_chats: Optional[int] = None) -> str:
6441
+ """
6442
+ Use this function to get the team chat history in reverse chronological order.
6443
+ Leave the num_chats parameter blank to get the entire chat history.
6321
6444
  Example:
6322
6445
  - To get the last chat, use num_chats=1
6323
6446
  - To get the last 5 chats, use num_chats=5
6324
- - To get all chats, use num_chats=None
6325
- - To get the first chat, use num_chats=None and take the first message
6447
+ - To get all chats, leave num_chats blank
6448
+
6449
+ Args:
6450
+ num_chats: The number of chats to return.
6451
+ Each chat contains 2 messages. One from the team and one from the user.
6452
+ Default: None
6453
+
6454
+ Returns:
6455
+ str: A JSON string containing a list of dictionaries representing the team chat history.
6326
6456
  """
6327
6457
  import json
6328
6458
 
@@ -6343,9 +6473,13 @@ class Team:
6343
6473
 
6344
6474
  return json.dumps(history)
6345
6475
 
6346
- return get_team_history
6476
+ if async_mode:
6477
+ get_chat_history_func = aget_chat_history
6478
+ else:
6479
+ get_chat_history_func = get_chat_history # type: ignore
6480
+ return Function.from_callable(get_chat_history_func, name="get_chat_history")
6347
6481
 
6348
- def update_session_state(self, session_state, session_state_updates: dict) -> str:
6482
+ def _update_session_state_tool(self, session_state, session_state_updates: dict) -> str:
6349
6483
  """
6350
6484
  Update the shared session state. Provide any updates as a dictionary of key-value pairs.
6351
6485
  Example:
@@ -6361,7 +6495,7 @@ class Team:
6361
6495
 
6362
6496
  def _get_previous_sessions_messages_function(
6363
6497
  self, num_history_sessions: Optional[int] = 2, user_id: Optional[str] = None
6364
- ) -> Callable:
6498
+ ):
6365
6499
  """Factory function to create a get_previous_session_messages function.
6366
6500
 
6367
6501
  Args:
@@ -6477,9 +6611,9 @@ class Team:
6477
6611
  return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
6478
6612
 
6479
6613
  if self._has_async_db():
6480
- return aget_previous_session_messages
6614
+ return Function.from_callable(aget_previous_session_messages, name="get_previous_session_messages")
6481
6615
  else:
6482
- return get_previous_session_messages
6616
+ return Function.from_callable(get_previous_session_messages, name="get_previous_session_messages")
6483
6617
 
6484
6618
  def _get_history_for_member_agent(self, session: TeamSession, member_agent: Union[Agent, "Team"]) -> List[Message]:
6485
6619
  from copy import deepcopy
@@ -6514,7 +6648,12 @@ class Team:
6514
6648
  return []
6515
6649
 
6516
6650
  def _determine_team_member_interactions(
6517
- self, team_run_context: Dict[str, Any], images: List[Image], videos: List[Video], audio: List[Audio]
6651
+ self,
6652
+ team_run_context: Dict[str, Any],
6653
+ images: List[Image],
6654
+ videos: List[Video],
6655
+ audio: List[Audio],
6656
+ files: List[File],
6518
6657
  ) -> Optional[str]:
6519
6658
  team_member_interactions_str = None
6520
6659
  if self.share_member_interactions:
@@ -6525,6 +6664,8 @@ class Team:
6525
6664
  videos.extend(context_videos)
6526
6665
  if context_audio := self._get_team_run_context_audio(team_run_context=team_run_context): # type: ignore
6527
6666
  audio.extend(context_audio)
6667
+ if context_files := self._get_team_run_context_files(team_run_context=team_run_context): # type: ignore
6668
+ files.extend(context_files)
6528
6669
  return team_member_interactions_str
6529
6670
 
6530
6671
  def _find_member_by_id(self, member_id: str) -> Optional[Tuple[int, Union[Agent, "Team"]]]:
@@ -6562,9 +6703,9 @@ class Team:
6562
6703
  team_run_context: Dict[str, Any],
6563
6704
  user_id: Optional[str] = None,
6564
6705
  stream: bool = False,
6565
- stream_intermediate_steps: bool = False,
6706
+ stream_events: bool = False,
6566
6707
  async_mode: bool = False,
6567
- input: Optional[Message] = None, # Used for determine_input_for_memberss=False
6708
+ input: Optional[str] = None, # Used for determine_input_for_members=False
6568
6709
  images: Optional[List[Image]] = None,
6569
6710
  videos: Optional[List[Video]] = None,
6570
6711
  audio: Optional[List[Audio]] = None,
@@ -6586,54 +6727,54 @@ class Team:
6586
6727
  if not files:
6587
6728
  files = []
6588
6729
 
6589
- def _setup_delegate_task_to_member(
6590
- member_agent: Union[Agent, "Team"], task_description: str, expected_output: Optional[str] = None
6591
- ):
6730
+ def _setup_delegate_task_to_member(member_agent: Union[Agent, "Team"], task_description: str):
6592
6731
  # 1. Initialize the member agent
6593
6732
  self._initialize_member(member_agent)
6594
6733
 
6595
- # 2. Determine team context to send
6734
+ # 2. Handle respond_directly nuances
6735
+ if self.respond_directly:
6736
+ # Since we return the response directly from the member agent, we need to set the output schema from the team down.
6737
+ if not member_agent.output_schema and self.output_schema:
6738
+ member_agent.output_schema = self.output_schema
6739
+
6740
+ # If the member will produce structured output, we need to parse the response
6741
+ if member_agent.output_schema is not None:
6742
+ self._member_response_model = member_agent.output_schema
6743
+
6744
+ # 3. Handle enable_agentic_knowledge_filters on the member agent
6745
+ if self.enable_agentic_knowledge_filters and not member_agent.enable_agentic_knowledge_filters:
6746
+ member_agent.enable_agentic_knowledge_filters = self.enable_agentic_knowledge_filters
6747
+
6748
+ # 4. Determine team context to send
6596
6749
  team_member_interactions_str = self._determine_team_member_interactions(
6597
- team_run_context, images, videos, audio
6750
+ team_run_context, images=images, videos=videos, audio=audio, files=files
6598
6751
  )
6599
6752
 
6600
- member_agent_task: Union[str, Message]
6753
+ # 5. Get the team history
6754
+ team_history_str = None
6755
+ if self.add_team_history_to_members and session:
6756
+ team_history_str = session.get_team_history_context(num_runs=self.num_team_history_runs)
6601
6757
 
6602
- # 3. Create the member agent task or use the input directly
6758
+ # 6. Create the member agent task or use the input directly
6603
6759
  if self.determine_input_for_members is False:
6604
6760
  member_agent_task = input # type: ignore
6605
6761
  else:
6606
- # Don't override the expected output of a member agent
6607
- if member_agent.expected_output is not None:
6608
- expected_output = None
6762
+ member_agent_task = task_description
6609
6763
 
6764
+ if team_history_str or team_member_interactions_str:
6610
6765
  member_agent_task = format_member_agent_task( # type: ignore
6611
- task_description, expected_output, team_member_interactions_str
6766
+ task_description=member_agent_task or "",
6767
+ team_member_interactions_str=team_member_interactions_str,
6768
+ team_history_str=team_history_str,
6612
6769
  )
6613
6770
 
6614
- # 4. Add history for the member if enabled (because we won't load the session for the member, so history won't be loaded automatically)
6771
+ # 7. Add member-level history for the member if enabled (because we won't load the session for the member, so history won't be loaded automatically)
6615
6772
  history = None
6616
- if member_agent.add_history_to_context or add_history_to_context:
6773
+ if member_agent.add_history_to_context:
6617
6774
  history = self._get_history_for_member_agent(session, member_agent)
6618
6775
  if history:
6619
6776
  if isinstance(member_agent_task, str):
6620
6777
  history.append(Message(role="user", content=member_agent_task))
6621
- else:
6622
- history.append(member_agent_task)
6623
-
6624
- # 5. Handle respond_directly
6625
- if self.respond_directly:
6626
- # Since we return the response directly from the member agent, we need to set the output schema from the team down.
6627
- if not member_agent.output_schema and self.output_schema:
6628
- member_agent.output_schema = self.output_schema
6629
-
6630
- # If the member will produce structured output, we need to parse the response
6631
- if member_agent.output_schema is not None:
6632
- self._member_response_model = member_agent.output_schema
6633
-
6634
- # 6. Handle enable_agentic_knowledge_filters on the member agent
6635
- if self.enable_agentic_knowledge_filters and not member_agent.enable_agentic_knowledge_filters:
6636
- member_agent.enable_agentic_knowledge_filters = self.enable_agentic_knowledge_filters
6637
6778
 
6638
6779
  return member_agent_task, history
6639
6780
 
@@ -6692,28 +6833,28 @@ class Team:
6692
6833
  self._update_team_media(member_agent_run_response) # type: ignore
6693
6834
 
6694
6835
  def delegate_task_to_member(
6695
- member_id: str, task_description: str, expected_output: Optional[str] = None
6836
+ member_id: str, task: str
6696
6837
  ) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
6697
6838
  """Use this function to delegate a task to the selected team member.
6698
6839
  You must provide a clear and concise description of the task the member should achieve AND the expected output.
6699
6840
 
6700
6841
  Args:
6701
6842
  member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
6702
- task_description (str): A clear and concise description of the task the member should achieve.
6703
- expected_output (str, optional): The expected output from the member (optional).
6843
+ task (str): A clear and concise description of the task the member should achieve.
6704
6844
  Returns:
6705
6845
  str: The result of the delegated task.
6706
6846
  """
6707
6847
 
6708
6848
  # Find the member agent using the helper function
6709
6849
  result = self._find_member_by_id(member_id)
6710
- history = None
6711
6850
  if result is None:
6712
6851
  yield f"Member with ID {member_id} not found in the team or any subteams. Please choose the correct member from the list of members:\n\n{self.get_members_system_message_content(indent=0)}"
6713
6852
  return
6714
6853
 
6715
6854
  _, member_agent = result
6716
- member_agent_task, history = _setup_delegate_task_to_member(member_agent, task_description, expected_output)
6855
+ member_agent_task, history = _setup_delegate_task_to_member(
6856
+ member_agent=member_agent, task_description=task
6857
+ )
6717
6858
 
6718
6859
  # Make sure for the member agent, we are using the agent logger
6719
6860
  use_agent_logger()
@@ -6731,7 +6872,7 @@ class Team:
6731
6872
  audio=audio,
6732
6873
  files=files,
6733
6874
  stream=True,
6734
- stream_intermediate_steps=stream_intermediate_steps,
6875
+ stream_events=stream_events,
6735
6876
  debug_mode=debug_mode,
6736
6877
  dependencies=dependencies,
6737
6878
  add_dependencies_to_context=add_dependencies_to_context,
@@ -6818,28 +6959,28 @@ class Team:
6818
6959
  )
6819
6960
 
6820
6961
  async def adelegate_task_to_member(
6821
- member_id: str, task_description: str, expected_output: Optional[str] = None
6962
+ member_id: str, task: str
6822
6963
  ) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
6823
6964
  """Use this function to delegate a task to the selected team member.
6824
6965
  You must provide a clear and concise description of the task the member should achieve AND the expected output.
6825
6966
 
6826
6967
  Args:
6827
6968
  member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
6828
- task_description (str): A clear and concise description of the task the member should achieve.
6829
- expected_output (str, optional): The expected output from the member (optional).
6969
+ task (str): A clear and concise description of the task the member should achieve.
6830
6970
  Returns:
6831
6971
  str: The result of the delegated task.
6832
6972
  """
6833
6973
 
6834
6974
  # Find the member agent using the helper function
6835
6975
  result = self._find_member_by_id(member_id)
6836
- history = None
6837
6976
  if result is None:
6838
6977
  yield f"Member with ID {member_id} not found in the team or any subteams. Please choose the correct member from the list of members:\n\n{self.get_members_system_message_content(indent=0)}"
6839
6978
  return
6840
6979
 
6841
6980
  _, member_agent = result
6842
- member_agent_task, history = _setup_delegate_task_to_member(member_agent, task_description, expected_output)
6981
+ member_agent_task, history = _setup_delegate_task_to_member(
6982
+ member_agent=member_agent, task_description=task
6983
+ )
6843
6984
 
6844
6985
  # Make sure for the member agent, we are using the agent logger
6845
6986
  use_agent_logger()
@@ -6857,7 +6998,7 @@ class Team:
6857
6998
  audio=audio,
6858
6999
  files=files,
6859
7000
  stream=True,
6860
- stream_intermediate_steps=stream_intermediate_steps,
7001
+ stream_events=stream_events,
6861
7002
  debug_mode=debug_mode,
6862
7003
  dependencies=dependencies,
6863
7004
  add_dependencies_to_context=add_dependencies_to_context,
@@ -6940,16 +7081,13 @@ class Team:
6940
7081
  )
6941
7082
 
6942
7083
  # When the task should be delegated to all members
6943
- def delegate_task_to_members(
6944
- task_description: str, expected_output: Optional[str] = None
6945
- ) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
7084
+ def delegate_task_to_members(task: str) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
6946
7085
  """
6947
7086
  Use this function to delegate a task to all the member agents and return a response.
6948
7087
  You must provide a clear and concise description of the task the member should achieve AND the expected output.
6949
7088
 
6950
7089
  Args:
6951
- task_description (str): A clear and concise description of the task to send to member agents.
6952
- expected_output (str, optional): The expected output from the member agents (optional).
7090
+ task (str): A clear and concise description of the task to send to member agents.
6953
7091
  Returns:
6954
7092
  str: The result of the delegated task.
6955
7093
  """
@@ -6957,7 +7095,7 @@ class Team:
6957
7095
  # Run all the members sequentially
6958
7096
  for _, member_agent in enumerate(self.members):
6959
7097
  member_agent_task, history = _setup_delegate_task_to_member(
6960
- member_agent, task_description, expected_output
7098
+ member_agent=member_agent, task_description=task
6961
7099
  )
6962
7100
 
6963
7101
  member_session_state_copy = copy(session_state)
@@ -6973,7 +7111,7 @@ class Team:
6973
7111
  audio=audio,
6974
7112
  files=files,
6975
7113
  stream=True,
6976
- stream_intermediate_steps=stream_intermediate_steps,
7114
+ stream_events=stream_events,
6977
7115
  knowledge_filters=knowledge_filters
6978
7116
  if not member_agent.knowledge_filters and member_agent.knowledge
6979
7117
  else None,
@@ -7055,15 +7193,12 @@ class Team:
7055
7193
  use_team_logger()
7056
7194
 
7057
7195
  # When the task should be delegated to all members
7058
- async def adelegate_task_to_members(
7059
- task_description: str, expected_output: Optional[str] = None
7060
- ) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
7196
+ async def adelegate_task_to_members(task: str) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
7061
7197
  """Use this function to delegate a task to all the member agents and return a response.
7062
- You must provide a clear and concise description of the task to send to member agents AND the expected output.
7198
+ You must provide a clear and concise description of the task to send to member agents.
7063
7199
 
7064
7200
  Args:
7065
- task_description (str): A clear and concise description of the task to send to member agents.
7066
- expected_output (str, optional): The expected output from the member agents (optional).
7201
+ task (str): A clear and concise description of the task to send to member agents.
7067
7202
  Returns:
7068
7203
  str: The result of the delegated task.
7069
7204
  """
@@ -7075,8 +7210,8 @@ class Team:
7075
7210
 
7076
7211
  async def stream_member(agent: Union[Agent, "Team"]) -> None:
7077
7212
  member_agent_task, history = _setup_delegate_task_to_member(
7078
- agent, task_description, expected_output
7079
- )
7213
+ member_agent=agent, task_description=task
7214
+ ) # type: ignore
7080
7215
  member_session_state_copy = copy(session_state)
7081
7216
 
7082
7217
  member_stream = agent.arun( # type: ignore
@@ -7089,7 +7224,7 @@ class Team:
7089
7224
  audio=audio,
7090
7225
  files=files,
7091
7226
  stream=True,
7092
- stream_intermediate_steps=stream_intermediate_steps,
7227
+ stream_events=stream_events,
7093
7228
  debug_mode=debug_mode,
7094
7229
  knowledge_filters=knowledge_filters
7095
7230
  if not member_agent.knowledge_filters and member_agent.knowledge
@@ -7150,7 +7285,7 @@ class Team:
7150
7285
  for member_agent_index, member_agent in enumerate(self.members):
7151
7286
  current_agent = member_agent
7152
7287
  member_agent_task, history = _setup_delegate_task_to_member(
7153
- current_agent, task_description, expected_output
7288
+ member_agent=current_agent, task_description=task
7154
7289
  )
7155
7290
 
7156
7291
  async def run_member_agent(agent=current_agent) -> str:
@@ -7166,7 +7301,7 @@ class Team:
7166
7301
  audio=audio,
7167
7302
  files=files,
7168
7303
  stream=False,
7169
- stream_intermediate_steps=stream_intermediate_steps,
7304
+ stream_events=stream_events,
7170
7305
  debug_mode=debug_mode,
7171
7306
  knowledge_filters=knowledge_filters
7172
7307
  if not member_agent.knowledge_filters and member_agent.knowledge
@@ -7240,24 +7375,28 @@ class Team:
7240
7375
  ###########################################################################
7241
7376
  # Session Management
7242
7377
  ###########################################################################
7243
- def _read_session(self, session_id: str) -> Optional[TeamSession]:
7378
+ def _read_session(
7379
+ self, session_id: str, session_type: SessionType = SessionType.TEAM
7380
+ ) -> Optional[Union[TeamSession, WorkflowSession]]:
7244
7381
  """Get a Session from the database."""
7245
7382
  try:
7246
7383
  if not self.db:
7247
7384
  raise ValueError("Db not initialized")
7248
- session = self.db.get_session(session_id=session_id, session_type=SessionType.TEAM)
7385
+ session = self.db.get_session(session_id=session_id, session_type=session_type)
7249
7386
  return session # type: ignore
7250
7387
  except Exception as e:
7251
7388
  log_warning(f"Error getting session from db: {e}")
7252
7389
  return None
7253
7390
 
7254
- async def _aread_session(self, session_id: str) -> Optional[TeamSession]:
7391
+ async def _aread_session(
7392
+ self, session_id: str, session_type: SessionType = SessionType.TEAM
7393
+ ) -> Optional[Union[TeamSession, WorkflowSession]]:
7255
7394
  """Get a Session from the database."""
7256
7395
  try:
7257
7396
  if not self.db:
7258
7397
  raise ValueError("Db not initialized")
7259
7398
  self.db = cast(AsyncBaseDb, self.db)
7260
- session = await self.db.get_session(session_id=session_id, session_type=SessionType.TEAM)
7399
+ session = await self.db.get_session(session_id=session_id, session_type=session_type)
7261
7400
  return session # type: ignore
7262
7401
  except Exception as e:
7263
7402
  log_warning(f"Error getting session from db: {e}")
@@ -7358,12 +7497,17 @@ class Team:
7358
7497
  # Create new session if none found
7359
7498
  if team_session is None:
7360
7499
  log_debug(f"Creating new TeamSession: {session_id}")
7500
+ session_data = {}
7501
+ if self.session_state is not None:
7502
+ from copy import deepcopy
7503
+
7504
+ session_data["session_state"] = deepcopy(self.session_state)
7361
7505
  team_session = TeamSession(
7362
7506
  session_id=session_id,
7363
7507
  team_id=self.id,
7364
7508
  user_id=user_id,
7365
7509
  team_data=self._get_team_data(),
7366
- session_data={},
7510
+ session_data=session_data,
7367
7511
  metadata=self.metadata,
7368
7512
  created_at=int(time()),
7369
7513
  )
@@ -7439,7 +7583,48 @@ class Team:
7439
7583
 
7440
7584
  # Load and return the session from the database
7441
7585
  if self.db is not None:
7442
- team_session = cast(TeamSession, self._read_session(session_id=session_id_to_load)) # type: ignore
7586
+ loaded_session = None
7587
+ # We have a standalone team, so we are loading a TeamSession
7588
+ if self.workflow_id is None:
7589
+ loaded_session = cast(TeamSession, self._read_session(session_id=session_id_to_load)) # type: ignore
7590
+ # We have a workflow team, so we are loading a WorkflowSession
7591
+ else:
7592
+ loaded_session = cast(WorkflowSession, self._read_session(session_id=session_id_to_load)) # type: ignore
7593
+
7594
+ # Cache the session if relevant
7595
+ if loaded_session is not None and self.cache_session:
7596
+ self._agent_session = loaded_session
7597
+
7598
+ return loaded_session
7599
+
7600
+ log_debug(f"TeamSession {session_id_to_load} not found in db")
7601
+ return None
7602
+
7603
+ async def aget_session(
7604
+ self,
7605
+ session_id: Optional[str] = None,
7606
+ ) -> Optional[TeamSession]:
7607
+ """Load an TeamSession from database.
7608
+
7609
+ Args:
7610
+ session_id: The session_id to load from storage.
7611
+
7612
+ Returns:
7613
+ TeamSession: The TeamSession loaded from the database or created if it does not exist.
7614
+ """
7615
+ if not session_id and not self.session_id:
7616
+ return None
7617
+
7618
+ session_id_to_load = session_id or self.session_id
7619
+
7620
+ # If there is a cached session, return it
7621
+ if self.cache_session and hasattr(self, "_team_session") and self._team_session is not None:
7622
+ if self._team_session.session_id == session_id_to_load:
7623
+ return self._team_session
7624
+
7625
+ # Load and return the session from the database
7626
+ if self.db is not None:
7627
+ team_session = cast(TeamSession, await self._aread_session(session_id=session_id_to_load)) # type: ignore
7443
7628
 
7444
7629
  # Cache the session if relevant
7445
7630
  if team_session is not None and self.cache_session:
@@ -7478,7 +7663,17 @@ class Team:
7478
7663
  session.session_data["session_state"].pop("current_session_id", None) # type: ignore
7479
7664
  session.session_data["session_state"].pop("current_user_id", None) # type: ignore
7480
7665
  session.session_data["session_state"].pop("current_run_id", None) # type: ignore
7481
- await self._aupsert_session(session=session)
7666
+
7667
+ # scrub the member responses if not storing them
7668
+ if not self.store_member_responses and session.runs is not None:
7669
+ for run in session.runs:
7670
+ if hasattr(run, "member_responses"):
7671
+ run.member_responses = []
7672
+
7673
+ if self._has_async_db():
7674
+ await self._aupsert_session(session=session)
7675
+ else:
7676
+ self._upsert_session(session=session)
7482
7677
  log_debug(f"Created or updated TeamSession record: {session.session_id}")
7483
7678
 
7484
7679
  def _load_session_state(self, session: TeamSession, session_state: Dict[str, Any]) -> Dict[str, Any]:
@@ -7608,6 +7803,60 @@ class Team:
7608
7803
  raise Exception("Session not found")
7609
7804
  return session.session_data.get("session_state", {}) if session.session_data is not None else {}
7610
7805
 
7806
+ def update_session_state(self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None) -> str:
7807
+ """
7808
+ Update the session state for the given session ID and user ID.
7809
+ Args:
7810
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
7811
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
7812
+ Returns:
7813
+ dict: The updated session state.
7814
+ """
7815
+ session_id = session_id or self.session_id
7816
+ if session_id is None:
7817
+ raise Exception("Session ID is not set")
7818
+ session = self.get_session(session_id=session_id) # type: ignore
7819
+ if session is None:
7820
+ raise Exception("Session not found")
7821
+
7822
+ if session.session_data is not None and "session_state" not in session.session_data:
7823
+ session.session_data["session_state"] = {}
7824
+
7825
+ for key, value in session_state_updates.items():
7826
+ session.session_data["session_state"][key] = value # type: ignore
7827
+
7828
+ self.save_session(session=session)
7829
+
7830
+ return session.session_data["session_state"] # type: ignore
7831
+
7832
+ async def aupdate_session_state(
7833
+ self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
7834
+ ) -> str:
7835
+ """
7836
+ Update the session state for the given session ID and user ID.
7837
+ Args:
7838
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
7839
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
7840
+ Returns:
7841
+ dict: The updated session state.
7842
+ """
7843
+ session_id = session_id or self.session_id
7844
+ if session_id is None:
7845
+ raise Exception("Session ID is not set")
7846
+ session = await self.aget_session(session_id=session_id) # type: ignore
7847
+ if session is None:
7848
+ raise Exception("Session not found")
7849
+
7850
+ if session.session_data is not None and "session_state" not in session.session_data:
7851
+ session.session_data["session_state"] = {}
7852
+
7853
+ for key, value in session_state_updates.items():
7854
+ session.session_data["session_state"][key] = value # type: ignore
7855
+
7856
+ await self.asave_session(session=session)
7857
+
7858
+ return session.session_data["session_state"] # type: ignore
7859
+
7611
7860
  def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
7612
7861
  """Get the session metrics for the given session ID and user ID."""
7613
7862
  session_id = session_id or self.session_id
@@ -7708,7 +7957,9 @@ class Team:
7708
7957
  return ""
7709
7958
  team_member_interactions_str = ""
7710
7959
  if "member_responses" in team_run_context:
7711
- team_member_interactions_str += "<member interactions>\n"
7960
+ team_member_interactions_str += (
7961
+ "<member_interaction_context>\nSee below interactions wit other team members.\n"
7962
+ )
7712
7963
 
7713
7964
  for interaction in team_run_context["member_responses"]:
7714
7965
  response_dict = interaction["run_response"].to_dict()
@@ -7721,7 +7972,7 @@ class Team:
7721
7972
  team_member_interactions_str += f"Task: {interaction['task']}\n"
7722
7973
  team_member_interactions_str += f"Response: {response_content}\n"
7723
7974
  team_member_interactions_str += "\n"
7724
- team_member_interactions_str += "</member interactions>\n"
7975
+ team_member_interactions_str += "</member_interaction_context>\n"
7725
7976
  return team_member_interactions_str
7726
7977
 
7727
7978
  def _get_team_run_context_images(self, team_run_context: Dict[str, Any]) -> List[Image]:
@@ -7754,6 +8005,16 @@ class Team:
7754
8005
  audio.extend(interaction["run_response"].audio)
7755
8006
  return audio
7756
8007
 
8008
+ def _get_team_run_context_files(self, team_run_context: Dict[str, Any]) -> List[File]:
8009
+ if not team_run_context:
8010
+ return []
8011
+ files = []
8012
+ if "member_responses" in team_run_context:
8013
+ for interaction in team_run_context["member_responses"]:
8014
+ if interaction["run_response"].files:
8015
+ files.extend(interaction["run_response"].files)
8016
+ return files
8017
+
7757
8018
  ###########################################################################
7758
8019
  # Handle images, videos and audio
7759
8020
  ###########################################################################