agno 2.0.10__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. agno/agent/agent.py +608 -175
  2. agno/db/in_memory/in_memory_db.py +42 -29
  3. agno/db/postgres/postgres.py +6 -4
  4. agno/exceptions.py +62 -1
  5. agno/guardrails/__init__.py +6 -0
  6. agno/guardrails/base.py +19 -0
  7. agno/guardrails/openai.py +144 -0
  8. agno/guardrails/pii.py +94 -0
  9. agno/guardrails/prompt_injection.py +51 -0
  10. agno/knowledge/embedder/aws_bedrock.py +9 -4
  11. agno/knowledge/embedder/azure_openai.py +54 -0
  12. agno/knowledge/embedder/base.py +2 -0
  13. agno/knowledge/embedder/cohere.py +184 -5
  14. agno/knowledge/embedder/google.py +79 -1
  15. agno/knowledge/embedder/huggingface.py +9 -4
  16. agno/knowledge/embedder/jina.py +63 -0
  17. agno/knowledge/embedder/mistral.py +78 -11
  18. agno/knowledge/embedder/ollama.py +5 -0
  19. agno/knowledge/embedder/openai.py +18 -54
  20. agno/knowledge/embedder/voyageai.py +69 -16
  21. agno/knowledge/knowledge.py +5 -4
  22. agno/knowledge/reader/pdf_reader.py +4 -3
  23. agno/knowledge/reader/website_reader.py +3 -2
  24. agno/models/base.py +125 -32
  25. agno/models/cerebras/cerebras.py +1 -0
  26. agno/models/cerebras/cerebras_openai.py +1 -0
  27. agno/models/dashscope/dashscope.py +1 -0
  28. agno/models/google/gemini.py +27 -5
  29. agno/models/litellm/chat.py +17 -0
  30. agno/models/openai/chat.py +13 -4
  31. agno/models/perplexity/perplexity.py +2 -3
  32. agno/models/requesty/__init__.py +5 -0
  33. agno/models/requesty/requesty.py +49 -0
  34. agno/models/vllm/vllm.py +1 -0
  35. agno/models/xai/xai.py +1 -0
  36. agno/os/app.py +167 -148
  37. agno/os/interfaces/whatsapp/router.py +2 -0
  38. agno/os/mcp.py +1 -1
  39. agno/os/middleware/__init__.py +7 -0
  40. agno/os/middleware/jwt.py +233 -0
  41. agno/os/router.py +181 -45
  42. agno/os/routers/home.py +2 -2
  43. agno/os/routers/memory/memory.py +23 -1
  44. agno/os/routers/memory/schemas.py +1 -1
  45. agno/os/routers/session/session.py +20 -3
  46. agno/os/utils.py +172 -8
  47. agno/run/agent.py +120 -77
  48. agno/run/team.py +115 -72
  49. agno/run/workflow.py +5 -15
  50. agno/session/summary.py +9 -10
  51. agno/session/team.py +2 -1
  52. agno/team/team.py +720 -168
  53. agno/tools/firecrawl.py +4 -4
  54. agno/tools/function.py +42 -2
  55. agno/tools/knowledge.py +3 -3
  56. agno/tools/searxng.py +2 -2
  57. agno/tools/serper.py +2 -2
  58. agno/tools/spider.py +2 -2
  59. agno/tools/workflow.py +4 -5
  60. agno/utils/events.py +66 -1
  61. agno/utils/hooks.py +57 -0
  62. agno/utils/media.py +11 -9
  63. agno/utils/print_response/agent.py +43 -5
  64. agno/utils/print_response/team.py +48 -12
  65. agno/vectordb/cassandra/cassandra.py +44 -4
  66. agno/vectordb/chroma/chromadb.py +79 -8
  67. agno/vectordb/clickhouse/clickhousedb.py +43 -6
  68. agno/vectordb/couchbase/couchbase.py +76 -5
  69. agno/vectordb/lancedb/lance_db.py +38 -3
  70. agno/vectordb/llamaindex/__init__.py +3 -0
  71. agno/vectordb/milvus/milvus.py +76 -4
  72. agno/vectordb/mongodb/mongodb.py +76 -4
  73. agno/vectordb/pgvector/pgvector.py +50 -6
  74. agno/vectordb/pineconedb/pineconedb.py +39 -2
  75. agno/vectordb/qdrant/qdrant.py +76 -26
  76. agno/vectordb/singlestore/singlestore.py +77 -4
  77. agno/vectordb/upstashdb/upstashdb.py +42 -2
  78. agno/vectordb/weaviate/weaviate.py +39 -3
  79. agno/workflow/types.py +1 -0
  80. agno/workflow/workflow.py +58 -2
  81. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/METADATA +4 -3
  82. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/RECORD +85 -75
  83. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/WHEEL +0 -0
  84. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/licenses/LICENSE +0 -0
  85. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/top_level.txt +0 -0
agno/team/team.py CHANGED
@@ -31,7 +31,13 @@ from pydantic import BaseModel
31
31
 
32
32
  from agno.agent import Agent
33
33
  from agno.db.base import BaseDb, SessionType, UserMemory
34
- from agno.exceptions import ModelProviderError, RunCancelledException
34
+ from agno.exceptions import (
35
+ InputCheckError,
36
+ ModelProviderError,
37
+ OutputCheckError,
38
+ RunCancelledException,
39
+ )
40
+ from agno.guardrails import BaseGuardrail
35
41
  from agno.knowledge.knowledge import Knowledge
36
42
  from agno.knowledge.types import KnowledgeFilter
37
43
  from agno.media import Audio, File, Image, Video
@@ -62,6 +68,8 @@ from agno.utils.events import (
62
68
  create_team_memory_update_started_event,
63
69
  create_team_parser_model_response_completed_event,
64
70
  create_team_parser_model_response_started_event,
71
+ create_team_pre_hook_completed_event,
72
+ create_team_pre_hook_started_event,
65
73
  create_team_reasoning_completed_event,
66
74
  create_team_reasoning_started_event,
67
75
  create_team_reasoning_step_event,
@@ -73,6 +81,7 @@ from agno.utils.events import (
73
81
  create_team_tool_call_completed_event,
74
82
  create_team_tool_call_started_event,
75
83
  )
84
+ from agno.utils.hooks import filter_hook_args, normalize_hooks
76
85
  from agno.utils.knowledge import get_agentic_or_user_search_filters
77
86
  from agno.utils.log import (
78
87
  log_debug,
@@ -161,6 +170,11 @@ class Team:
161
170
  # If True, cache the current Team session in memory for faster access
162
171
  cache_session: bool = False
163
172
 
173
+ # If True, allow searching through previous sessions
174
+ search_session_history: Optional[bool] = False
175
+ # Number of past sessions to include in the search
176
+ num_history_sessions: Optional[int] = None
177
+
164
178
  # If True, resolve the session_state, dependencies, and metadata in the user and system messages
165
179
  resolve_in_context: bool = True
166
180
 
@@ -263,6 +277,12 @@ class Team:
263
277
  # A list of hooks to be called before and after the tool call
264
278
  tool_hooks: Optional[List[Callable]] = None
265
279
 
280
+ # --- Team Hooks ---
281
+ # Functions called right after team session is loaded, before processing starts
282
+ pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
283
+ # Functions called after output is generated but before the response is returned
284
+ post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
285
+
266
286
  # --- Structured output ---
267
287
  # Input schema for validating input
268
288
  input_schema: Optional[Type[BaseModel]] = None
@@ -369,6 +389,8 @@ class Team:
369
389
  overwrite_db_session_state: bool = False,
370
390
  resolve_in_context: bool = True,
371
391
  cache_session: bool = False,
392
+ search_session_history: Optional[bool] = False,
393
+ num_history_sessions: Optional[int] = None,
372
394
  description: Optional[str] = None,
373
395
  instructions: Optional[Union[str, List[str], Callable]] = None,
374
396
  expected_output: Optional[str] = None,
@@ -401,6 +423,8 @@ class Team:
401
423
  tool_call_limit: Optional[int] = None,
402
424
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
403
425
  tool_hooks: Optional[List[Callable]] = None,
426
+ pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
427
+ post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
404
428
  input_schema: Optional[Type[BaseModel]] = None,
405
429
  output_schema: Optional[Type[BaseModel]] = None,
406
430
  parser_model: Optional[Model] = None,
@@ -460,6 +484,9 @@ class Team:
460
484
  self.resolve_in_context = resolve_in_context
461
485
  self.cache_session = cache_session
462
486
 
487
+ self.search_session_history = search_session_history
488
+ self.num_history_sessions = num_history_sessions
489
+
463
490
  self.description = description
464
491
  self.instructions = instructions
465
492
  self.expected_output = expected_output
@@ -498,6 +525,10 @@ class Team:
498
525
  self.tool_call_limit = tool_call_limit
499
526
  self.tool_hooks = tool_hooks
500
527
 
528
+ # Initialize hooks with backward compatibility
529
+ self.pre_hooks = pre_hooks
530
+ self.post_hooks = post_hooks
531
+
501
532
  self.input_schema = input_schema
502
533
  self.output_schema = output_schema
503
534
  self.parser_model = parser_model
@@ -579,6 +610,8 @@ class Team:
579
610
 
580
611
  self._rebuild_tools = True
581
612
 
613
+ self._hooks_normalised = False
614
+
582
615
  @property
583
616
  def should_parse_structured_output(self) -> bool:
584
617
  return self.output_schema is not None and self.parse_response and self.parser_model is None
@@ -811,36 +844,304 @@ class Team:
811
844
  """
812
845
  return cancel_run_global(run_id)
813
846
 
847
+ def _execute_pre_hooks(
848
+ self,
849
+ hooks: Optional[List[Callable[..., Any]]],
850
+ run_response: TeamRunOutput,
851
+ run_input: TeamRunInput,
852
+ session: TeamSession,
853
+ user_id: Optional[str] = None,
854
+ debug_mode: Optional[bool] = None,
855
+ **kwargs: Any,
856
+ ) -> Iterator[TeamRunOutputEvent]:
857
+ """Execute multiple pre-hook functions in succession."""
858
+ if hooks is None:
859
+ return
860
+
861
+ # Prepare all possible arguments once
862
+ all_args = {
863
+ "run_input": run_input,
864
+ "team": self,
865
+ "session": session,
866
+ "user_id": user_id,
867
+ "debug_mode": debug_mode or self.debug_mode,
868
+ }
869
+ all_args.update(kwargs)
870
+
871
+ for i, hook in enumerate(hooks):
872
+ yield self._handle_event(
873
+ run_response=run_response,
874
+ event=create_team_pre_hook_started_event(
875
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
876
+ ),
877
+ )
878
+ try:
879
+ # Filter arguments to only include those that the hook accepts
880
+ filtered_args = filter_hook_args(hook, all_args)
881
+
882
+ hook(**filtered_args)
883
+
884
+ yield self._handle_event(
885
+ run_response=run_response,
886
+ event=create_team_pre_hook_completed_event(
887
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
888
+ ),
889
+ )
890
+
891
+ except (InputCheckError, OutputCheckError) as e:
892
+ raise e
893
+ except Exception as e:
894
+ log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
895
+ log_exception(e)
896
+ finally:
897
+ # Reset global log mode incase an agent in the pre-hook changed it
898
+ self._set_debug(debug_mode=debug_mode)
899
+
900
+ # Update the input on the run_response
901
+ run_response.input = run_input
902
+
903
+ async def _aexecute_pre_hooks(
904
+ self,
905
+ hooks: Optional[List[Callable[..., Any]]],
906
+ run_response: TeamRunOutput,
907
+ run_input: TeamRunInput,
908
+ session: TeamSession,
909
+ user_id: Optional[str] = None,
910
+ debug_mode: Optional[bool] = None,
911
+ **kwargs: Any,
912
+ ) -> AsyncIterator[TeamRunOutputEvent]:
913
+ """Execute multiple pre-hook functions in succession (async version)."""
914
+ if hooks is None:
915
+ return
916
+
917
+ # Prepare all possible arguments once
918
+ all_args = {
919
+ "run_input": run_input,
920
+ "team": self,
921
+ "session": session,
922
+ "user_id": user_id,
923
+ "debug_mode": debug_mode or self.debug_mode,
924
+ }
925
+ all_args.update(kwargs)
926
+
927
+ for i, hook in enumerate(hooks):
928
+ yield self._handle_event(
929
+ run_response=run_response,
930
+ event=create_team_pre_hook_started_event(
931
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
932
+ ),
933
+ )
934
+ try:
935
+ # Filter arguments to only include those that the hook accepts
936
+ filtered_args = filter_hook_args(hook, all_args)
937
+
938
+ if asyncio.iscoroutinefunction(hook):
939
+ await hook(**filtered_args)
940
+ else:
941
+ # Synchronous function
942
+ hook(**filtered_args)
943
+
944
+ yield self._handle_event(
945
+ run_response=run_response,
946
+ event=create_team_pre_hook_completed_event(
947
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
948
+ ),
949
+ )
950
+
951
+ except (InputCheckError, OutputCheckError) as e:
952
+ raise e
953
+ except Exception as e:
954
+ log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
955
+ log_exception(e)
956
+ finally:
957
+ # Reset global log mode incase an agent in the pre-hook changed it
958
+ self._set_debug(debug_mode=debug_mode)
959
+
960
+ # Update the input on the run_response
961
+ run_response.input = run_input
962
+
963
+ def _execute_post_hooks(
964
+ self,
965
+ hooks: Optional[List[Callable[..., Any]]],
966
+ run_output: TeamRunOutput,
967
+ session: TeamSession,
968
+ user_id: Optional[str] = None,
969
+ debug_mode: Optional[bool] = None,
970
+ **kwargs: Any,
971
+ ) -> None:
972
+ """Execute multiple post-hook functions in succession."""
973
+ if hooks is None:
974
+ return
975
+
976
+ # Prepare all possible arguments once
977
+ all_args = {
978
+ "run_output": run_output,
979
+ "team": self,
980
+ "session": session,
981
+ "user_id": user_id,
982
+ "debug_mode": debug_mode or self.debug_mode,
983
+ }
984
+ all_args.update(kwargs)
985
+
986
+ for i, hook in enumerate(hooks):
987
+ try:
988
+ # Filter arguments to only include those that the hook accepts
989
+ filtered_args = filter_hook_args(hook, all_args)
990
+
991
+ hook(**filtered_args)
992
+
993
+ except (InputCheckError, OutputCheckError) as e:
994
+ raise e
995
+ except Exception as e:
996
+ log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
997
+ log_exception(e)
998
+
999
+ async def _aexecute_post_hooks(
1000
+ self,
1001
+ hooks: Optional[List[Callable[..., Any]]],
1002
+ run_output: TeamRunOutput,
1003
+ session: TeamSession,
1004
+ user_id: Optional[str] = None,
1005
+ debug_mode: Optional[bool] = None,
1006
+ **kwargs: Any,
1007
+ ) -> None:
1008
+ """Execute multiple post-hook functions in succession (async version)."""
1009
+ if hooks is None:
1010
+ return
1011
+
1012
+ # Prepare all possible arguments once
1013
+ all_args = {
1014
+ "run_output": run_output,
1015
+ "team": self,
1016
+ "session": session,
1017
+ "user_id": user_id,
1018
+ "debug_mode": debug_mode or self.debug_mode,
1019
+ }
1020
+ all_args.update(kwargs)
1021
+
1022
+ for i, hook in enumerate(hooks):
1023
+ try:
1024
+ # Filter arguments to only include those that the hook accepts
1025
+ filtered_args = filter_hook_args(hook, all_args)
1026
+
1027
+ if asyncio.iscoroutinefunction(hook):
1028
+ await hook(**filtered_args)
1029
+ else:
1030
+ hook(**filtered_args)
1031
+
1032
+ except (InputCheckError, OutputCheckError) as e:
1033
+ raise e
1034
+ except Exception as e:
1035
+ log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
1036
+ log_exception(e)
1037
+
814
1038
  def _run(
815
1039
  self,
816
1040
  run_response: TeamRunOutput,
817
- run_messages: RunMessages,
818
1041
  session: TeamSession,
1042
+ session_state: Dict[str, Any],
819
1043
  user_id: Optional[str] = None,
1044
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1045
+ add_history_to_context: Optional[bool] = None,
1046
+ add_dependencies_to_context: Optional[bool] = None,
1047
+ add_session_state_to_context: Optional[bool] = None,
1048
+ metadata: Optional[Dict[str, Any]] = None,
820
1049
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1050
+ dependencies: Optional[Dict[str, Any]] = None,
1051
+ debug_mode: Optional[bool] = None,
1052
+ **kwargs: Any,
821
1053
  ) -> TeamRunOutput:
822
1054
  """Run the Team and return the response.
823
1055
 
824
1056
  Steps:
825
- 1. Reason about the task(s) if reasoning is enabled
826
- 2. Get a response from the model
827
- 3. Update Team Memory
828
- 4. Add RunOutput to Team Session
829
- 5. Calculate session metrics
830
- 6. Save session to storage
1057
+ 1. Execute pre-hooks
1058
+ 2. Get run messages
1059
+ 3. Reason about the task(s) if reasoning is enabled
1060
+ 4. Get a response from the model
1061
+ 5. Update TeamRunOutput
1062
+ 6. Execute post-hooks
1063
+ 7. Add RunOutput to Team Session
1064
+ 8. Calculate session metrics
1065
+ 9. Update Team Memory
1066
+ 10. Save session to storage
831
1067
  """
832
- log_debug(f"Team Run Start: {run_response.run_id}", center=True)
833
1068
 
834
1069
  # Register run for cancellation tracking
835
1070
  register_run(run_response.run_id) # type: ignore
836
1071
 
837
- # 1. Reason about the task(s) if reasoning is enabled
1072
+ # 1. Execute pre-hooks
1073
+ run_input = cast(TeamRunInput, run_response.input)
1074
+ self.model = cast(Model, self.model)
1075
+ if self.pre_hooks is not None:
1076
+ # Can modify the run input
1077
+ pre_hook_iterator = self._execute_pre_hooks(
1078
+ hooks=self.pre_hooks, # type: ignore
1079
+ run_response=run_response,
1080
+ run_input=run_input,
1081
+ session=session,
1082
+ user_id=user_id,
1083
+ debug_mode=debug_mode,
1084
+ **kwargs,
1085
+ )
1086
+ # Consume the generator without yielding
1087
+ deque(pre_hook_iterator, maxlen=0)
1088
+
1089
+ # Initialize team run context
1090
+ team_run_context: Dict[str, Any] = {}
1091
+
1092
+ self.determine_tools_for_model(
1093
+ model=self.model,
1094
+ run_response=run_response,
1095
+ team_run_context=team_run_context,
1096
+ session=session,
1097
+ session_state=session_state,
1098
+ user_id=user_id,
1099
+ async_mode=False,
1100
+ knowledge_filters=knowledge_filters,
1101
+ input_message=run_input.input_content,
1102
+ images=run_input.images,
1103
+ videos=run_input.videos,
1104
+ audio=run_input.audios,
1105
+ files=run_input.files,
1106
+ debug_mode=debug_mode,
1107
+ add_history_to_context=add_history_to_context,
1108
+ add_session_state_to_context=add_session_state_to_context,
1109
+ dependencies=dependencies,
1110
+ add_dependencies_to_context=add_dependencies_to_context,
1111
+ metadata=metadata,
1112
+ )
1113
+
1114
+ # 2. Prepare run messages
1115
+ run_messages: RunMessages = self._get_run_messages(
1116
+ run_response=run_response,
1117
+ session=session,
1118
+ session_state=session_state,
1119
+ user_id=user_id,
1120
+ input_message=run_input.input_content,
1121
+ audio=run_input.audios,
1122
+ images=run_input.images,
1123
+ videos=run_input.videos,
1124
+ files=run_input.files,
1125
+ knowledge_filters=knowledge_filters,
1126
+ add_history_to_context=add_history_to_context,
1127
+ dependencies=dependencies,
1128
+ add_dependencies_to_context=add_dependencies_to_context,
1129
+ add_session_state_to_context=add_session_state_to_context,
1130
+ metadata=metadata,
1131
+ **kwargs,
1132
+ )
1133
+ if len(run_messages.messages) == 0:
1134
+ log_error("No messages to be sent to the model.")
1135
+
1136
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1137
+
1138
+ # 3. Reason about the task(s) if reasoning is enabled
838
1139
  self._handle_reasoning(run_response=run_response, run_messages=run_messages)
839
1140
 
840
1141
  # Check for cancellation before model call
841
1142
  raise_if_cancelled(run_response.run_id) # type: ignore
842
1143
 
843
- # 2. Get the model response for the team leader
1144
+ # 4. Get the model response for the team leader
844
1145
  self.model = cast(Model, self.model)
845
1146
  model_response: ModelResponse = self.model.response(
846
1147
  messages=run_messages.messages,
@@ -861,7 +1162,7 @@ class Team:
861
1162
  # If a parser model is provided, structure the response separately
862
1163
  self._parse_response_with_parser_model(model_response, run_messages)
863
1164
 
864
- # Update TeamRunOutput
1165
+ # 5. Update TeamRunOutput
865
1166
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
866
1167
 
867
1168
  if self.store_media:
@@ -874,10 +1175,28 @@ class Team:
874
1175
  # Parse team response model
875
1176
  self._convert_response_to_structured_format(run_response=run_response)
876
1177
 
877
- # 3. Add the RunOutput to Team Session
1178
+ # Set the run duration
1179
+ if run_response.metrics:
1180
+ run_response.metrics.stop_timer()
1181
+
1182
+ # 6. Execute post-hooks after output is generated but before response is returned
1183
+ if self.post_hooks is not None:
1184
+ self._execute_post_hooks(
1185
+ hooks=self.post_hooks, # type: ignore
1186
+ run_output=run_response,
1187
+ session=session,
1188
+ user_id=user_id,
1189
+ debug_mode=debug_mode,
1190
+ **kwargs,
1191
+ )
1192
+
1193
+ # 7. Add the RunOutput to Team Session
878
1194
  session.upsert_run(run_response=run_response)
879
1195
 
880
- # 4. Update Team Memory
1196
+ # 8. Calculate session metrics
1197
+ self._update_session_metrics(session=session)
1198
+
1199
+ # 9. Update Team Memory
881
1200
  response_iterator = self._make_memories_and_summaries(
882
1201
  run_response=run_response,
883
1202
  run_messages=run_messages,
@@ -886,10 +1205,7 @@ class Team:
886
1205
  )
887
1206
  deque(response_iterator, maxlen=0)
888
1207
 
889
- # 5. Calculate session metrics
890
- self._update_session_metrics(session=session)
891
-
892
- # 6. Save session to storage
1208
+ # 10. Save session to storage
893
1209
  self.save_session(session=session)
894
1210
 
895
1211
  # Log Team Telemetry
@@ -905,36 +1221,110 @@ class Team:
905
1221
  def _run_stream(
906
1222
  self,
907
1223
  run_response: TeamRunOutput,
908
- run_messages: RunMessages,
909
1224
  session: TeamSession,
1225
+ session_state: Dict[str, Any],
910
1226
  user_id: Optional[str] = None,
1227
+ knowledge_filters: Optional[Dict[str, Any]] = None,
1228
+ add_history_to_context: Optional[bool] = None,
1229
+ add_dependencies_to_context: Optional[bool] = None,
1230
+ add_session_state_to_context: Optional[bool] = None,
1231
+ metadata: Optional[Dict[str, Any]] = None,
1232
+ dependencies: Optional[Dict[str, Any]] = None,
911
1233
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
912
1234
  stream_intermediate_steps: bool = False,
913
1235
  workflow_context: Optional[Dict] = None,
914
1236
  yield_run_response: bool = False,
1237
+ debug_mode: Optional[bool] = None,
1238
+ **kwargs: Any,
915
1239
  ) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
916
1240
  """Run the Team and return the response iterator.
917
1241
 
918
1242
  Steps:
919
- 1. Reason about the task(s) if reasoning is enabled
920
- 2. Get a response from the model
921
- 3. Update Team Memory
922
- 4. Add RunOutput to Team Session
923
- 5. Calculate session metrics
924
- 6. Save session to storage
1243
+ 1. Execute pre-hooks
1244
+ 2. Prepare run messages
1245
+ 3. Reason about the task(s) if reasoning is enabled
1246
+ 4. Get a response from the model
1247
+ 5. Add RunOutput to Team Session
1248
+ 6. Calculate session metrics
1249
+ 7. Update Team Memory
1250
+ 8. Save session to storage
925
1251
  """
926
-
927
- log_debug(f"Team Run Start: {run_response.run_id}", center=True)
928
-
929
1252
  # Register run for cancellation tracking
930
1253
  register_run(run_response.run_id) # type: ignore
931
1254
 
1255
+ # 1. Execute pre-hooks
1256
+ run_input = cast(TeamRunInput, run_response.input)
1257
+ self.model = cast(Model, self.model)
1258
+ if self.pre_hooks is not None:
1259
+ # Can modify the run input
1260
+ pre_hook_iterator = self._execute_pre_hooks(
1261
+ hooks=self.pre_hooks, # type: ignore
1262
+ run_response=run_response,
1263
+ run_input=run_input,
1264
+ session=session,
1265
+ user_id=user_id,
1266
+ debug_mode=debug_mode,
1267
+ **kwargs,
1268
+ )
1269
+ for pre_hook_event in pre_hook_iterator:
1270
+ yield pre_hook_event
1271
+
1272
+ # Initialize team run context
1273
+ team_run_context: Dict[str, Any] = {}
1274
+
1275
+ self.determine_tools_for_model(
1276
+ model=self.model,
1277
+ run_response=run_response,
1278
+ team_run_context=team_run_context,
1279
+ session=session,
1280
+ session_state=session_state,
1281
+ user_id=user_id,
1282
+ async_mode=False,
1283
+ knowledge_filters=knowledge_filters,
1284
+ input_message=run_input.input_content,
1285
+ images=run_input.images,
1286
+ videos=run_input.videos,
1287
+ audio=run_input.audios,
1288
+ files=run_input.files,
1289
+ workflow_context=workflow_context,
1290
+ debug_mode=debug_mode,
1291
+ add_history_to_context=add_history_to_context,
1292
+ add_session_state_to_context=add_session_state_to_context,
1293
+ dependencies=dependencies,
1294
+ add_dependencies_to_context=add_dependencies_to_context,
1295
+ metadata=metadata,
1296
+ )
1297
+
1298
+ # 2. Prepare run messages
1299
+ run_messages: RunMessages = self._get_run_messages(
1300
+ run_response=run_response,
1301
+ session=session,
1302
+ session_state=session_state,
1303
+ user_id=user_id,
1304
+ input_message=run_input.input_content,
1305
+ audio=run_input.audios,
1306
+ images=run_input.images,
1307
+ videos=run_input.videos,
1308
+ files=run_input.files,
1309
+ knowledge_filters=knowledge_filters,
1310
+ add_history_to_context=add_history_to_context,
1311
+ dependencies=dependencies,
1312
+ add_dependencies_to_context=add_dependencies_to_context,
1313
+ add_session_state_to_context=add_session_state_to_context,
1314
+ metadata=metadata,
1315
+ **kwargs,
1316
+ )
1317
+ if len(run_messages.messages) == 0:
1318
+ log_error("No messages to be sent to the model.")
1319
+
1320
+ log_debug(f"Team Run Start: {run_response.run_id}", center=True)
1321
+
932
1322
  try:
933
1323
  # Start the Run by yielding a RunStarted event
934
1324
  if stream_intermediate_steps:
935
1325
  yield self._handle_event(create_team_run_started_event(run_response), run_response, workflow_context)
936
1326
 
937
- # 1. Reason about the task(s) if reasoning is enabled
1327
+ # 3. Reason about the task(s) if reasoning is enabled
938
1328
  yield from self._handle_reasoning_stream(
939
1329
  run_response=run_response,
940
1330
  run_messages=run_messages,
@@ -943,7 +1333,7 @@ class Team:
943
1333
  # Check for cancellation before model processing
944
1334
  raise_if_cancelled(run_response.run_id) # type: ignore
945
1335
 
946
- # 2. Get a response from the model
1336
+ # 4. Get a response from the model
947
1337
  if self.output_model is None:
948
1338
  for event in self._handle_model_response_stream(
949
1339
  session=session,
@@ -996,18 +1386,16 @@ class Team:
996
1386
 
997
1387
  run_response.status = RunStatus.completed
998
1388
 
999
- # 3. Add the run to Team Session
1000
- session.upsert_run(run_response=run_response)
1389
+ # Set the run duration
1390
+ if run_response.metrics:
1391
+ run_response.metrics.stop_timer()
1001
1392
 
1002
- # 4. Update Team Memory
1003
- yield from self._make_memories_and_summaries(
1004
- run_response=run_response,
1005
- run_messages=run_messages,
1006
- session=session,
1007
- user_id=user_id,
1008
- )
1393
+ # TODO: For now we don't run post-hooks during streaming
1009
1394
 
1010
- # 5. Calculate session metrics
1395
+ # 5. Add the run to Team Session
1396
+ session.upsert_run(run_response=run_response)
1397
+
1398
+ # 6. Calculate session metrics
1011
1399
  self._update_session_metrics(session=session)
1012
1400
 
1013
1401
  completed_event = self._handle_event(
@@ -1018,7 +1406,15 @@ class Team:
1018
1406
  workflow_context,
1019
1407
  )
1020
1408
 
1021
- # 6. Save session to storage
1409
+ # 7. Update Team Memory
1410
+ yield from self._make_memories_and_summaries(
1411
+ run_response=run_response,
1412
+ run_messages=run_messages,
1413
+ session=session,
1414
+ user_id=user_id,
1415
+ )
1416
+
1417
+ # 8. Save session to storage
1022
1418
  self.save_session(session=session)
1023
1419
 
1024
1420
  if stream_intermediate_steps:
@@ -1129,11 +1525,19 @@ class Team:
1129
1525
  ) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
1130
1526
  """Run the Team and return the response."""
1131
1527
 
1528
+ # Create a run_id for this specific run
1529
+ run_id = str(uuid4())
1530
+
1132
1531
  # Validate input against input_schema if provided
1133
1532
  validated_input = self._validate_input(input)
1134
1533
 
1135
- # Create a run_id for this specific run
1136
- run_id = str(uuid4())
1534
+ # Normalise hook & guardails
1535
+ if not self._hooks_normalised:
1536
+ if self.pre_hooks:
1537
+ self.pre_hooks = normalize_hooks(self.pre_hooks)
1538
+ if self.post_hooks:
1539
+ self.post_hooks = normalize_hooks(self.post_hooks)
1540
+ self._hooks_normalised = True
1137
1541
 
1138
1542
  session_id, user_id, session_state = self._initialize_session(
1139
1543
  run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
@@ -1148,7 +1552,7 @@ class Team:
1148
1552
 
1149
1553
  # Create RunInput to capture the original user input
1150
1554
  run_input = TeamRunInput(
1151
- input_content=input,
1555
+ input_content=validated_input,
1152
1556
  images=image_artifacts,
1153
1557
  videos=video_artifacts,
1154
1558
  audios=audio_artifacts,
@@ -1231,31 +1635,9 @@ class Team:
1231
1635
  run_response.model = self.model.id if self.model is not None else None
1232
1636
  run_response.model_provider = self.model.provider if self.model is not None else None
1233
1637
 
1234
- # Initialize team run context
1235
- team_run_context: Dict[str, Any] = {}
1236
-
1237
- self.determine_tools_for_model(
1238
- model=self.model,
1239
- run_response=run_response,
1240
- team_run_context=team_run_context,
1241
- session=team_session,
1242
- session_state=session_state,
1243
- user_id=user_id,
1244
- async_mode=False,
1245
- knowledge_filters=effective_filters,
1246
- input_message=input,
1247
- images=images,
1248
- videos=videos,
1249
- audio=audio,
1250
- files=files,
1251
- workflow_context=workflow_context,
1252
- debug_mode=debug_mode,
1253
- add_history_to_context=add_history,
1254
- add_session_state_to_context=add_session_state,
1255
- dependencies=run_dependencies,
1256
- add_dependencies_to_context=add_dependencies,
1257
- metadata=metadata,
1258
- )
1638
+ # Start the run metrics timer, to calculate the run duration
1639
+ run_response.metrics = Metrics()
1640
+ run_response.metrics.start_timer()
1259
1641
 
1260
1642
  # If no retries are set, use the team's default retries
1261
1643
  retries = retries if retries is not None else self.retries
@@ -1269,48 +1651,47 @@ class Team:
1269
1651
 
1270
1652
  # Run the team
1271
1653
  try:
1272
- run_messages = self._get_run_messages(
1273
- run_response=run_response,
1274
- session=team_session,
1275
- session_state=session_state,
1276
- user_id=user_id,
1277
- input_message=validated_input,
1278
- audio=audio,
1279
- images=images,
1280
- videos=videos,
1281
- files=files,
1282
- knowledge_filters=effective_filters,
1283
- add_history_to_context=add_history,
1284
- dependencies=run_dependencies,
1285
- add_dependencies_to_context=add_dependencies,
1286
- add_session_state_to_context=add_session_state,
1287
- **kwargs,
1288
- )
1289
- if len(run_messages.messages) == 0:
1290
- log_error("No messages to be sent to the model.")
1291
-
1292
1654
  if stream:
1293
1655
  response_iterator = self._run_stream(
1294
1656
  run_response=run_response,
1295
- run_messages=run_messages,
1296
1657
  session=team_session,
1658
+ session_state=session_state,
1297
1659
  user_id=user_id,
1660
+ knowledge_filters=effective_filters,
1661
+ add_history_to_context=add_history,
1662
+ add_dependencies_to_context=add_dependencies,
1663
+ add_session_state_to_context=add_session_state,
1664
+ metadata=metadata,
1665
+ dependencies=run_dependencies,
1298
1666
  response_format=response_format,
1299
1667
  stream_intermediate_steps=stream_intermediate_steps,
1300
1668
  workflow_context=workflow_context,
1301
1669
  yield_run_response=yield_run_response,
1670
+ debug_mode=debug_mode,
1671
+ **kwargs,
1302
1672
  )
1303
1673
 
1304
1674
  return response_iterator # type: ignore
1305
1675
  else:
1306
1676
  return self._run(
1307
1677
  run_response=run_response,
1308
- run_messages=run_messages,
1309
1678
  session=team_session,
1679
+ session_state=session_state,
1310
1680
  user_id=user_id,
1681
+ knowledge_filters=effective_filters,
1682
+ add_history_to_context=add_history,
1683
+ add_dependencies_to_context=add_dependencies,
1684
+ add_session_state_to_context=add_session_state,
1685
+ metadata=metadata,
1686
+ dependencies=run_dependencies,
1311
1687
  response_format=response_format,
1688
+ debug_mode=debug_mode,
1689
+ **kwargs,
1312
1690
  )
1313
1691
 
1692
+ except (InputCheckError, OutputCheckError) as e:
1693
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1694
+ raise e
1314
1695
  except ModelProviderError as e:
1315
1696
  import time
1316
1697
 
@@ -1365,14 +1746,9 @@ class Team:
1365
1746
  async def _arun(
1366
1747
  self,
1367
1748
  run_response: TeamRunOutput,
1368
- input_message: Union[str, List, Dict, Message, BaseModel, List[Message]],
1369
1749
  session: TeamSession,
1370
- session_state: Optional[Dict[str, Any]] = None,
1750
+ session_state: Dict[str, Any],
1371
1751
  user_id: Optional[str] = None,
1372
- images: Optional[Sequence[Image]] = None,
1373
- videos: Optional[Sequence[Video]] = None,
1374
- audio: Optional[Sequence[Audio]] = None,
1375
- files: Optional[Sequence[File]] = None,
1376
1752
  knowledge_filters: Optional[Dict[str, Any]] = None,
1377
1753
  add_history_to_context: Optional[bool] = None,
1378
1754
  add_dependencies_to_context: Optional[bool] = None,
@@ -1380,35 +1756,82 @@ class Team:
1380
1756
  metadata: Optional[Dict[str, Any]] = None,
1381
1757
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1382
1758
  dependencies: Optional[Dict[str, Any]] = None,
1759
+ workflow_context: Optional[Dict] = None,
1760
+ debug_mode: Optional[bool] = None,
1383
1761
  **kwargs: Any,
1384
1762
  ) -> TeamRunOutput:
1385
1763
  """Run the Team and return the response.
1386
1764
 
1387
1765
  Steps:
1388
1766
  1. Resolve dependencies
1389
- 2. Prepare run messages
1390
- 3. Reason about the task(s) if reasoning is enabled
1391
- 4. Get a response from the model
1392
- 5. Update Team Memory
1767
+ 2. Execute pre-hooks
1768
+ 3. Prepare run messages
1769
+ 4. Reason about the task(s) if reasoning is enabled
1770
+ 5. Get a response from the model
1393
1771
  6. Add RunOutput to Team Session
1394
1772
  7. Calculate session metrics
1395
- 8. Save session to storage
1773
+ 8. Update Team Memory
1774
+ 9. Save session to storage
1396
1775
  """
1397
1776
  # 1. Resolve callable dependencies if present
1398
1777
  if dependencies is not None:
1399
1778
  await self._aresolve_run_dependencies(dependencies=dependencies)
1400
1779
 
1401
- # 2. Prepare run messages
1780
+ run_input = cast(TeamRunInput, run_response.input)
1781
+ self.model = cast(Model, self.model)
1782
+ # 2. Execute pre-hooks after session is loaded but before processing starts
1783
+ if self.pre_hooks is not None:
1784
+ pre_hook_iterator = self._aexecute_pre_hooks(
1785
+ hooks=self.pre_hooks, # type: ignore
1786
+ run_response=run_response,
1787
+ run_input=run_input,
1788
+ session=session,
1789
+ user_id=user_id,
1790
+ debug_mode=debug_mode,
1791
+ **kwargs,
1792
+ )
1793
+
1794
+ # Consume the async iterator without yielding
1795
+ async for _ in pre_hook_iterator:
1796
+ pass
1797
+
1798
+ # Initialize the team run context
1799
+ team_run_context: Dict[str, Any] = {}
1800
+
1801
+ self.determine_tools_for_model(
1802
+ model=self.model,
1803
+ run_response=run_response,
1804
+ team_run_context=team_run_context,
1805
+ session=session,
1806
+ session_state=session_state,
1807
+ user_id=user_id,
1808
+ async_mode=True,
1809
+ knowledge_filters=knowledge_filters,
1810
+ input_message=run_input.input_content,
1811
+ images=run_input.images,
1812
+ videos=run_input.videos,
1813
+ audio=run_input.audios,
1814
+ files=run_input.files,
1815
+ workflow_context=workflow_context,
1816
+ debug_mode=debug_mode,
1817
+ add_history_to_context=add_history_to_context,
1818
+ add_dependencies_to_context=add_dependencies_to_context,
1819
+ add_session_state_to_context=add_session_state_to_context,
1820
+ dependencies=dependencies,
1821
+ metadata=metadata,
1822
+ )
1823
+
1824
+ # 3. Prepare run messages
1402
1825
  run_messages = self._get_run_messages(
1403
1826
  run_response=run_response,
1404
1827
  session=session,
1405
1828
  session_state=session_state,
1406
1829
  user_id=user_id,
1407
- input_message=input_message,
1408
- audio=audio,
1409
- images=images,
1410
- videos=videos,
1411
- files=files,
1830
+ input_message=run_input.input_content,
1831
+ audio=run_input.audios,
1832
+ images=run_input.images,
1833
+ videos=run_input.videos,
1834
+ files=run_input.files,
1412
1835
  knowledge_filters=knowledge_filters,
1413
1836
  add_history_to_context=add_history_to_context,
1414
1837
  dependencies=dependencies,
@@ -1424,13 +1847,13 @@ class Team:
1424
1847
  # Register run for cancellation tracking
1425
1848
  register_run(run_response.run_id) # type: ignore
1426
1849
 
1427
- # 3. Reason about the task(s) if reasoning is enabled
1850
+ # 4. Reason about the task(s) if reasoning is enabled
1428
1851
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1429
1852
 
1430
1853
  # Check for cancellation before model call
1431
1854
  raise_if_cancelled(run_response.run_id) # type: ignore
1432
1855
 
1433
- # 4. Get the model response for the team leader
1856
+ # 5. Get the model response for the team leader
1434
1857
  model_response = await self.model.aresponse(
1435
1858
  messages=run_messages.messages,
1436
1859
  tools=self._tools_for_model,
@@ -1463,7 +1886,11 @@ class Team:
1463
1886
  # Parse team response model
1464
1887
  self._convert_response_to_structured_format(run_response=run_response)
1465
1888
 
1466
- # 5. Add the run to memory
1889
+ # Set the run duration
1890
+ if run_response.metrics:
1891
+ run_response.metrics.stop_timer()
1892
+
1893
+ # 6. Add the run to session
1467
1894
  session.upsert_run(run_response=run_response)
1468
1895
 
1469
1896
  # 6. Update Team Memory
@@ -1484,6 +1911,17 @@ class Team:
1484
1911
  # Log Team Telemetry
1485
1912
  await self._alog_team_telemetry(session_id=session.session_id, run_id=run_response.run_id)
1486
1913
 
1914
+ # Execute post-hooks after output is generated but before response is returned
1915
+ if self.post_hooks is not None:
1916
+ await self._aexecute_post_hooks(
1917
+ hooks=self.post_hooks, # type: ignore
1918
+ run_output=run_response,
1919
+ session=session,
1920
+ user_id=user_id,
1921
+ debug_mode=debug_mode,
1922
+ **kwargs,
1923
+ )
1924
+
1487
1925
  log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
1488
1926
 
1489
1927
  # Always clean up the run tracking
@@ -1494,14 +1932,9 @@ class Team:
1494
1932
  async def _arun_stream(
1495
1933
  self,
1496
1934
  run_response: TeamRunOutput,
1497
- input_message: Union[str, List, Dict, Message, BaseModel, List[Message]],
1498
1935
  session: TeamSession,
1499
- session_state: Optional[Dict[str, Any]] = None,
1936
+ session_state: Dict[str, Any],
1500
1937
  user_id: Optional[str] = None,
1501
- images: Optional[Sequence[Image]] = None,
1502
- videos: Optional[Sequence[Video]] = None,
1503
- audio: Optional[Sequence[Audio]] = None,
1504
- files: Optional[Sequence[File]] = None,
1505
1938
  knowledge_filters: Optional[Dict[str, Any]] = None,
1506
1939
  add_history_to_context: Optional[bool] = None,
1507
1940
  add_dependencies_to_context: Optional[bool] = None,
@@ -1512,6 +1945,7 @@ class Team:
1512
1945
  stream_intermediate_steps: bool = False,
1513
1946
  workflow_context: Optional[Dict] = None,
1514
1947
  yield_run_response: bool = False,
1948
+ debug_mode: Optional[bool] = None,
1515
1949
  **kwargs: Any,
1516
1950
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
1517
1951
  """Run the Team and return the response.
@@ -1531,17 +1965,59 @@ class Team:
1531
1965
  if dependencies is not None:
1532
1966
  await self._aresolve_run_dependencies(dependencies=dependencies)
1533
1967
 
1968
+ # Execute pre-hooks
1969
+ run_input = cast(TeamRunInput, run_response.input)
1970
+ self.model = cast(Model, self.model)
1971
+ if self.pre_hooks is not None:
1972
+ pre_hook_iterator = self._aexecute_pre_hooks(
1973
+ hooks=self.pre_hooks, # type: ignore
1974
+ run_response=run_response,
1975
+ run_input=run_input,
1976
+ session=session,
1977
+ user_id=user_id,
1978
+ debug_mode=debug_mode,
1979
+ **kwargs,
1980
+ )
1981
+ async for pre_hook_event in pre_hook_iterator:
1982
+ yield pre_hook_event
1983
+
1984
+ # Initialize the team run context
1985
+ team_run_context: Dict[str, Any] = {}
1986
+
1987
+ self.determine_tools_for_model(
1988
+ model=self.model,
1989
+ run_response=run_response,
1990
+ team_run_context=team_run_context,
1991
+ session=session,
1992
+ session_state=session_state,
1993
+ user_id=user_id,
1994
+ async_mode=True,
1995
+ knowledge_filters=knowledge_filters,
1996
+ input_message=run_input.input_content,
1997
+ images=run_input.images,
1998
+ videos=run_input.videos,
1999
+ audio=run_input.audios,
2000
+ files=run_input.files,
2001
+ workflow_context=workflow_context,
2002
+ debug_mode=debug_mode,
2003
+ add_history_to_context=add_history_to_context,
2004
+ add_dependencies_to_context=add_dependencies_to_context,
2005
+ add_session_state_to_context=add_session_state_to_context,
2006
+ dependencies=dependencies,
2007
+ metadata=metadata,
2008
+ )
2009
+
1534
2010
  # 2. Prepare run messages
1535
2011
  run_messages = self._get_run_messages(
1536
2012
  run_response=run_response,
1537
2013
  session=session,
1538
2014
  session_state=session_state,
1539
2015
  user_id=user_id,
1540
- input_message=input_message,
1541
- audio=audio,
1542
- images=images,
1543
- videos=videos,
1544
- files=files,
2016
+ input_message=run_input.input_content,
2017
+ audio=run_input.audios,
2018
+ images=run_input.images,
2019
+ videos=run_input.videos,
2020
+ files=run_input.files,
1545
2021
  knowledge_filters=knowledge_filters,
1546
2022
  add_history_to_context=add_history_to_context,
1547
2023
  dependencies=dependencies,
@@ -1552,6 +2028,7 @@ class Team:
1552
2028
  )
1553
2029
 
1554
2030
  log_debug(f"Team Run Start: {run_response.run_id}", center=True)
2031
+
1555
2032
  # Register run for cancellation tracking
1556
2033
  register_run(run_response.run_id) # type: ignore
1557
2034
 
@@ -1624,6 +2101,10 @@ class Team:
1624
2101
 
1625
2102
  run_response.status = RunStatus.completed
1626
2103
 
2104
+ # Set the run duration
2105
+ if run_response.metrics:
2106
+ run_response.metrics.stop_timer()
2107
+
1627
2108
  # 5. Add the run to Team Session
1628
2109
  session.upsert_run(run_response=run_response)
1629
2110
 
@@ -1754,11 +2235,19 @@ class Team:
1754
2235
  ) -> Union[TeamRunOutput, AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
1755
2236
  """Run the Team asynchronously and return the response."""
1756
2237
 
2238
+ # Create a run_id for this specific run
2239
+ run_id = str(uuid4())
2240
+
1757
2241
  # Validate input against input_schema if provided
1758
2242
  validated_input = self._validate_input(input)
1759
2243
 
1760
- # Create a run_id for this specific run
1761
- run_id = str(uuid4())
2244
+ # Normalise hook & guardails
2245
+ if not self._hooks_normalised:
2246
+ if self.pre_hooks:
2247
+ self.pre_hooks = normalize_hooks(self.pre_hooks, async_mode=True)
2248
+ if self.post_hooks:
2249
+ self.post_hooks = normalize_hooks(self.post_hooks, async_mode=True)
2250
+ self._hooks_normalised = True
1762
2251
 
1763
2252
  session_id, user_id, session_state = self._initialize_session(
1764
2253
  run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
@@ -1773,7 +2262,7 @@ class Team:
1773
2262
 
1774
2263
  # Create RunInput to capture the original user input
1775
2264
  run_input = TeamRunInput(
1776
- input_content=input,
2265
+ input_content=validated_input,
1777
2266
  images=image_artifacts,
1778
2267
  videos=video_artifacts,
1779
2268
  audios=audio_artifacts,
@@ -1804,7 +2293,6 @@ class Team:
1804
2293
  workflow_context = kwargs.pop("workflow_context", None)
1805
2294
 
1806
2295
  effective_filters = knowledge_filters
1807
-
1808
2296
  # When filters are passed manually
1809
2297
  if self.knowledge_filters or knowledge_filters:
1810
2298
  effective_filters = self._get_effective_filters(knowledge_filters)
@@ -1851,31 +2339,9 @@ class Team:
1851
2339
  run_response.model = self.model.id if self.model is not None else None
1852
2340
  run_response.model_provider = self.model.provider if self.model is not None else None
1853
2341
 
1854
- # Initialize the team run context
1855
- team_run_context: Dict[str, Any] = {}
1856
-
1857
- self.determine_tools_for_model(
1858
- model=self.model,
1859
- run_response=run_response,
1860
- team_run_context=team_run_context,
1861
- session=team_session, # type: ignore
1862
- session_state=session_state,
1863
- user_id=user_id,
1864
- async_mode=True,
1865
- knowledge_filters=effective_filters,
1866
- input_message=input,
1867
- images=images,
1868
- videos=videos,
1869
- audio=audio,
1870
- files=files,
1871
- workflow_context=workflow_context,
1872
- debug_mode=debug_mode,
1873
- add_history_to_context=add_history_to_context,
1874
- add_dependencies_to_context=add_dependencies_to_context,
1875
- add_session_state_to_context=add_session_state_to_context,
1876
- dependencies=dependencies,
1877
- metadata=metadata,
1878
- )
2342
+ # Start the run metrics timer, to calculate the run duration
2343
+ run_response.metrics = Metrics()
2344
+ run_response.metrics.start_timer()
1879
2345
 
1880
2346
  # If no retries are set, use the team's default retries
1881
2347
  retries = retries if retries is not None else self.retries
@@ -1890,14 +2356,9 @@ class Team:
1890
2356
  if stream:
1891
2357
  response_iterator = self._arun_stream(
1892
2358
  run_response=run_response,
1893
- input_message=validated_input,
1894
2359
  session=team_session, # type: ignore
1895
2360
  session_state=session_state,
1896
2361
  user_id=user_id,
1897
- audio=audio,
1898
- images=images,
1899
- videos=videos,
1900
- files=files,
1901
2362
  knowledge_filters=effective_filters,
1902
2363
  add_history_to_context=add_history,
1903
2364
  add_dependencies_to_context=add_dependencies,
@@ -1908,20 +2369,16 @@ class Team:
1908
2369
  stream_intermediate_steps=stream_intermediate_steps,
1909
2370
  workflow_context=workflow_context,
1910
2371
  yield_run_response=yield_run_response,
2372
+ debug_mode=debug_mode,
1911
2373
  **kwargs,
1912
2374
  )
1913
2375
  return response_iterator # type: ignore
1914
2376
  else:
1915
2377
  return self._arun( # type: ignore
1916
2378
  run_response=run_response,
1917
- input_message=validated_input,
1918
2379
  session=team_session, # type: ignore
1919
2380
  user_id=user_id,
1920
2381
  session_state=session_state,
1921
- audio=audio,
1922
- images=images,
1923
- videos=videos,
1924
- files=files,
1925
2382
  knowledge_filters=effective_filters,
1926
2383
  add_history_to_context=add_history,
1927
2384
  add_dependencies_to_context=add_dependencies,
@@ -1929,9 +2386,14 @@ class Team:
1929
2386
  metadata=metadata,
1930
2387
  response_format=response_format,
1931
2388
  dependencies=run_dependencies,
2389
+ workflow_context=workflow_context,
2390
+ debug_mode=debug_mode,
1932
2391
  **kwargs,
1933
2392
  )
1934
2393
 
2394
+ except (InputCheckError, OutputCheckError) as e:
2395
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
2396
+ raise e
1935
2397
  except ModelProviderError as e:
1936
2398
  log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
1937
2399
  last_exception = e
@@ -2050,11 +2512,12 @@ class Team:
2050
2512
  # Update the TeamRunOutput metrics
2051
2513
  run_response.metrics = self._calculate_metrics(messages_for_run_response)
2052
2514
 
2053
- for tool_call in model_response.tool_calls:
2054
- tool_name = tool_call.get("tool_name", "")
2055
- if tool_name.lower() in ["think", "analyze"]:
2056
- tool_args = tool_call.get("tool_args", {})
2057
- self._update_reasoning_content_from_tool_call(run_response, tool_name, tool_args)
2515
+ if model_response.tool_executions:
2516
+ for tool_call in model_response.tool_executions:
2517
+ tool_name = tool_call.tool_name
2518
+ if tool_name and tool_name.lower() in ["think", "analyze"]:
2519
+ tool_args = tool_call.tool_args or {}
2520
+ self._update_reasoning_content_from_tool_call(run_response, tool_name, tool_args)
2058
2521
 
2059
2522
  def _handle_model_response_stream(
2060
2523
  self,
@@ -4079,6 +4542,13 @@ class Team:
4079
4542
  if self.enable_agentic_state:
4080
4543
  _tools.append(self.update_session_state)
4081
4544
 
4545
+ if self.search_session_history:
4546
+ _tools.append(
4547
+ self._get_previous_sessions_messages_function(
4548
+ num_history_sessions=self.num_history_sessions, user_id=user_id
4549
+ )
4550
+ )
4551
+
4082
4552
  if self.knowledge is not None or self.knowledge_retriever is not None:
4083
4553
  # Check if knowledge retriever is an async function but used in sync mode
4084
4554
  from inspect import iscoroutinefunction
@@ -4681,7 +5151,7 @@ class Team:
4681
5151
  history = session.get_messages_from_last_n_runs(
4682
5152
  last_n=self.num_history_runs,
4683
5153
  skip_role=skip_role,
4684
- team_id=self.id,
5154
+ team_id=self.id if self.parent_team_id is not None else None,
4685
5155
  )
4686
5156
 
4687
5157
  if len(history) > 0:
@@ -5196,10 +5666,76 @@ class Team:
5196
5666
 
5197
5667
  return f"Updated session state: {session_state}"
5198
5668
 
5669
+ def _get_previous_sessions_messages_function(
5670
+ self, num_history_sessions: Optional[int] = 2, user_id: Optional[str] = None
5671
+ ) -> Callable:
5672
+ """Factory function to create a get_previous_session_messages function.
5673
+
5674
+ Args:
5675
+ num_history_sessions: The last n sessions to be taken from db
5676
+ user_id: The user ID to filter sessions by
5677
+
5678
+ Returns:
5679
+ Callable: A function that retrieves messages from previous sessions
5680
+ """
5681
+
5682
+ def get_previous_session_messages() -> str:
5683
+ """Use this function to retrieve messages from previous chat sessions.
5684
+ USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
5685
+
5686
+ Returns:
5687
+ str: JSON formatted list of message pairs from previous sessions
5688
+ """
5689
+ import json
5690
+
5691
+ if self.db is None:
5692
+ return "Previous session messages not available"
5693
+
5694
+ selected_sessions = self.db.get_sessions(
5695
+ session_type=SessionType.TEAM,
5696
+ limit=num_history_sessions,
5697
+ user_id=user_id,
5698
+ sort_by="created_at",
5699
+ sort_order="desc",
5700
+ )
5701
+
5702
+ all_messages = []
5703
+ seen_message_pairs = set()
5704
+
5705
+ for session in selected_sessions:
5706
+ if isinstance(session, TeamSession) and session.runs:
5707
+ message_count = 0
5708
+ for run in session.runs:
5709
+ messages = run.messages
5710
+ if messages is not None:
5711
+ for i in range(0, len(messages) - 1, 2):
5712
+ if i + 1 < len(messages):
5713
+ try:
5714
+ user_msg = messages[i]
5715
+ assistant_msg = messages[i + 1]
5716
+ user_content = user_msg.content
5717
+ assistant_content = assistant_msg.content
5718
+ if user_content is None or assistant_content is None:
5719
+ continue # Skip this pair if either message has no content
5720
+
5721
+ msg_pair_id = f"{user_content}:{assistant_content}"
5722
+ if msg_pair_id not in seen_message_pairs:
5723
+ seen_message_pairs.add(msg_pair_id)
5724
+ all_messages.append(Message.model_validate(user_msg))
5725
+ all_messages.append(Message.model_validate(assistant_msg))
5726
+ message_count += 1
5727
+ except Exception as e:
5728
+ log_warning(f"Error processing message pair: {e}")
5729
+ continue
5730
+
5731
+ return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
5732
+
5733
+ return get_previous_session_messages
5734
+
5199
5735
  def _get_history_for_member_agent(self, session: TeamSession, member_agent: Union[Agent, "Team"]) -> List[Message]:
5200
5736
  from copy import deepcopy
5201
5737
 
5202
- log_info(f"Adding messages from history for {member_agent.name}")
5738
+ log_debug(f"Adding messages from history for {member_agent.name}")
5203
5739
 
5204
5740
  member_agent_id = member_agent.id if isinstance(member_agent, Agent) else None
5205
5741
  member_team_id = member_agent.id if isinstance(member_agent, Team) else None
@@ -5464,6 +6000,9 @@ class Team:
5464
6000
  check_if_run_cancelled(member_agent_run_output_event)
5465
6001
 
5466
6002
  # Yield the member event directly
6003
+ member_agent_run_output_event.parent_run_id = (
6004
+ member_agent_run_output_event.parent_run_id or run_response.run_id
6005
+ )
5467
6006
  yield member_agent_run_output_event
5468
6007
  else:
5469
6008
  member_agent_run_response = member_agent.run( # type: ignore
@@ -5589,6 +6128,9 @@ class Team:
5589
6128
  check_if_run_cancelled(member_agent_run_response_event)
5590
6129
 
5591
6130
  # Yield the member event directly
6131
+ member_agent_run_response_event.parent_run_id = (
6132
+ member_agent_run_response_event.parent_run_id or run_response.run_id
6133
+ )
5592
6134
  yield member_agent_run_response_event
5593
6135
  else:
5594
6136
  member_agent_run_response = await member_agent.arun( # type: ignore
@@ -5704,6 +6246,9 @@ class Team:
5704
6246
  check_if_run_cancelled(member_agent_run_response_chunk)
5705
6247
 
5706
6248
  # Yield the member event directly
6249
+ member_agent_run_response_chunk.parent_run_id = (
6250
+ member_agent_run_response_chunk.parent_run_id or run_response.run_id
6251
+ )
5707
6252
  yield member_agent_run_response_chunk
5708
6253
 
5709
6254
  else:
@@ -5815,6 +6360,9 @@ class Team:
5815
6360
  member_agent_run_response = member_agent_run_output_event # type: ignore
5816
6361
  break
5817
6362
  check_if_run_cancelled(member_agent_run_output_event)
6363
+ member_agent_run_output_event.parent_run_id = (
6364
+ member_agent_run_output_event.parent_run_id or run_response.run_id
6365
+ )
5818
6366
  await queue.put(member_agent_run_output_event)
5819
6367
  finally:
5820
6368
  _process_delegate_task_to_member(
@@ -6268,6 +6816,7 @@ class Team:
6268
6816
 
6269
6817
  if session is None:
6270
6818
  raise Exception("Session not found")
6819
+
6271
6820
  return session.get_chat_history()
6272
6821
 
6273
6822
  def get_messages_for_session(self, session_id: Optional[str] = None) -> List[Message]:
@@ -6296,6 +6845,9 @@ class Team:
6296
6845
 
6297
6846
  session = self.get_session(session_id=session_id)
6298
6847
 
6848
+ if session is None:
6849
+ raise Exception(f"Session {session_id} not found")
6850
+
6299
6851
  return session.get_session_summary() # type: ignore
6300
6852
 
6301
6853
  def get_user_memories(self, user_id: Optional[str] = None) -> Optional[List[UserMemory]]: