agno 2.0.10__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. agno/agent/agent.py +608 -175
  2. agno/db/in_memory/in_memory_db.py +42 -29
  3. agno/db/postgres/postgres.py +6 -4
  4. agno/exceptions.py +62 -1
  5. agno/guardrails/__init__.py +6 -0
  6. agno/guardrails/base.py +19 -0
  7. agno/guardrails/openai.py +144 -0
  8. agno/guardrails/pii.py +94 -0
  9. agno/guardrails/prompt_injection.py +51 -0
  10. agno/knowledge/embedder/aws_bedrock.py +9 -4
  11. agno/knowledge/embedder/azure_openai.py +54 -0
  12. agno/knowledge/embedder/base.py +2 -0
  13. agno/knowledge/embedder/cohere.py +184 -5
  14. agno/knowledge/embedder/google.py +79 -1
  15. agno/knowledge/embedder/huggingface.py +9 -4
  16. agno/knowledge/embedder/jina.py +63 -0
  17. agno/knowledge/embedder/mistral.py +78 -11
  18. agno/knowledge/embedder/ollama.py +5 -0
  19. agno/knowledge/embedder/openai.py +18 -54
  20. agno/knowledge/embedder/voyageai.py +69 -16
  21. agno/knowledge/knowledge.py +5 -4
  22. agno/knowledge/reader/pdf_reader.py +4 -3
  23. agno/knowledge/reader/website_reader.py +3 -2
  24. agno/models/base.py +125 -32
  25. agno/models/cerebras/cerebras.py +1 -0
  26. agno/models/cerebras/cerebras_openai.py +1 -0
  27. agno/models/dashscope/dashscope.py +1 -0
  28. agno/models/google/gemini.py +27 -5
  29. agno/models/litellm/chat.py +17 -0
  30. agno/models/openai/chat.py +13 -4
  31. agno/models/perplexity/perplexity.py +2 -3
  32. agno/models/requesty/__init__.py +5 -0
  33. agno/models/requesty/requesty.py +49 -0
  34. agno/models/vllm/vllm.py +1 -0
  35. agno/models/xai/xai.py +1 -0
  36. agno/os/app.py +167 -148
  37. agno/os/interfaces/whatsapp/router.py +2 -0
  38. agno/os/mcp.py +1 -1
  39. agno/os/middleware/__init__.py +7 -0
  40. agno/os/middleware/jwt.py +233 -0
  41. agno/os/router.py +181 -45
  42. agno/os/routers/home.py +2 -2
  43. agno/os/routers/memory/memory.py +23 -1
  44. agno/os/routers/memory/schemas.py +1 -1
  45. agno/os/routers/session/session.py +20 -3
  46. agno/os/utils.py +172 -8
  47. agno/run/agent.py +120 -77
  48. agno/run/team.py +115 -72
  49. agno/run/workflow.py +5 -15
  50. agno/session/summary.py +9 -10
  51. agno/session/team.py +2 -1
  52. agno/team/team.py +720 -168
  53. agno/tools/firecrawl.py +4 -4
  54. agno/tools/function.py +42 -2
  55. agno/tools/knowledge.py +3 -3
  56. agno/tools/searxng.py +2 -2
  57. agno/tools/serper.py +2 -2
  58. agno/tools/spider.py +2 -2
  59. agno/tools/workflow.py +4 -5
  60. agno/utils/events.py +66 -1
  61. agno/utils/hooks.py +57 -0
  62. agno/utils/media.py +11 -9
  63. agno/utils/print_response/agent.py +43 -5
  64. agno/utils/print_response/team.py +48 -12
  65. agno/vectordb/cassandra/cassandra.py +44 -4
  66. agno/vectordb/chroma/chromadb.py +79 -8
  67. agno/vectordb/clickhouse/clickhousedb.py +43 -6
  68. agno/vectordb/couchbase/couchbase.py +76 -5
  69. agno/vectordb/lancedb/lance_db.py +38 -3
  70. agno/vectordb/llamaindex/__init__.py +3 -0
  71. agno/vectordb/milvus/milvus.py +76 -4
  72. agno/vectordb/mongodb/mongodb.py +76 -4
  73. agno/vectordb/pgvector/pgvector.py +50 -6
  74. agno/vectordb/pineconedb/pineconedb.py +39 -2
  75. agno/vectordb/qdrant/qdrant.py +76 -26
  76. agno/vectordb/singlestore/singlestore.py +77 -4
  77. agno/vectordb/upstashdb/upstashdb.py +42 -2
  78. agno/vectordb/weaviate/weaviate.py +39 -3
  79. agno/workflow/types.py +1 -0
  80. agno/workflow/workflow.py +58 -2
  81. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/METADATA +4 -3
  82. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/RECORD +85 -75
  83. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/WHEEL +0 -0
  84. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/licenses/LICENSE +0 -0
  85. {agno-2.0.10.dist-info → agno-2.1.0.dist-info}/top_level.txt +0 -0
agno/agent/agent.py CHANGED
@@ -28,7 +28,14 @@ from uuid import uuid4
28
28
  from pydantic import BaseModel
29
29
 
30
30
  from agno.db.base import BaseDb, SessionType, UserMemory
31
- from agno.exceptions import ModelProviderError, RunCancelledException, StopAgentRun
31
+ from agno.exceptions import (
32
+ InputCheckError,
33
+ ModelProviderError,
34
+ OutputCheckError,
35
+ RunCancelledException,
36
+ StopAgentRun,
37
+ )
38
+ from agno.guardrails import BaseGuardrail
32
39
  from agno.knowledge.knowledge import Knowledge
33
40
  from agno.knowledge.types import KnowledgeFilter
34
41
  from agno.media import Audio, File, Image, Video
@@ -64,6 +71,8 @@ from agno.utils.events import (
64
71
  create_memory_update_started_event,
65
72
  create_parser_model_response_completed_event,
66
73
  create_parser_model_response_started_event,
74
+ create_pre_hook_completed_event,
75
+ create_pre_hook_started_event,
67
76
  create_reasoning_completed_event,
68
77
  create_reasoning_started_event,
69
78
  create_reasoning_step_event,
@@ -77,6 +86,7 @@ from agno.utils.events import (
77
86
  create_tool_call_completed_event,
78
87
  create_tool_call_started_event,
79
88
  )
89
+ from agno.utils.hooks import filter_hook_args, normalize_hooks
80
90
  from agno.utils.knowledge import get_agentic_or_user_search_filters
81
91
  from agno.utils.log import (
82
92
  log_debug,
@@ -209,6 +219,12 @@ class Agent:
209
219
  # A function that acts as middleware and is called around tool calls.
210
220
  tool_hooks: Optional[List[Callable]] = None
211
221
 
222
+ # --- Agent Hooks ---
223
+ # Functions called right after agent-session is loaded, before processing starts
224
+ pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
225
+ # Functions called after output is generated but before the response is returned
226
+ post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
227
+
212
228
  # --- Agent Reasoning ---
213
229
  # Enable reasoning by working through the problem step by step.
214
230
  reasoning: bool = False
@@ -379,6 +395,8 @@ class Agent:
379
395
  tool_call_limit: Optional[int] = None,
380
396
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
381
397
  tool_hooks: Optional[List[Callable]] = None,
398
+ pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
399
+ post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
382
400
  reasoning: bool = False,
383
401
  reasoning_model: Optional[Model] = None,
384
402
  reasoning_agent: Optional[Agent] = None,
@@ -481,6 +499,10 @@ class Agent:
481
499
  self.tool_choice = tool_choice
482
500
  self.tool_hooks = tool_hooks
483
501
 
502
+ # Initialize hooks with backward compatibility
503
+ self.pre_hooks = pre_hooks
504
+ self.post_hooks = post_hooks
505
+
484
506
  self.reasoning = reasoning
485
507
  self.reasoning_model = reasoning_model
486
508
  self.reasoning_agent = reasoning_agent
@@ -553,6 +575,8 @@ class Agent:
553
575
 
554
576
  self._formatter: Optional[SafeFormatter] = None
555
577
 
578
+ self._hooks_normalised = False
579
+
556
580
  def set_id(self) -> None:
557
581
  if self.id is None:
558
582
  self.id = generate_id_from_name(self.name)
@@ -742,35 +766,97 @@ class Agent:
742
766
  def _run(
743
767
  self,
744
768
  run_response: RunOutput,
745
- run_messages: RunMessages,
746
769
  session: AgentSession,
770
+ session_state: Optional[Dict[str, Any]] = None,
747
771
  user_id: Optional[str] = None,
772
+ knowledge_filters: Optional[Dict[str, Any]] = None,
773
+ add_history_to_context: Optional[bool] = None,
774
+ add_dependencies_to_context: Optional[bool] = None,
775
+ add_session_state_to_context: Optional[bool] = None,
776
+ metadata: Optional[Dict[str, Any]] = None,
748
777
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
778
+ dependencies: Optional[Dict[str, Any]] = None,
779
+ debug_mode: Optional[bool] = None,
780
+ **kwargs: Any,
749
781
  ) -> RunOutput:
750
782
  """
751
783
  Run the Agent and return the RunOutput.
752
784
  Steps:
753
- 1. Reason about the task if reasoning is enabled
754
- 2. Generate a response from the Model (includes running function calls)
755
- 3. Update the RunOutput with the model response
756
- 4. Update Agent Memory
757
- 5. Calculate session metrics
758
- 6. Add RunOutput to Agent Session
759
- 7. Save session to storage
785
+ 1. Execute pre-hooks
786
+ 2. Prepare run messages
787
+ 3. Reason about the task if reasoning is enabled
788
+ 4. Generate a response from the Model (includes running function calls)
789
+ 5. Update the RunOutput with the model response
790
+ 6. Execute post-hooks
791
+ 7. Calculate session metrics
760
792
  8. Optional: Save output to file if save_response_to_file is set
793
+ 9. Add RunOutput to Agent Session
794
+ 10. Update Agent Memory
795
+ 11. Save session to storage
761
796
  """
762
- log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
763
797
 
764
798
  # Register run for cancellation tracking
765
799
  register_run(run_response.run_id) # type: ignore
766
800
 
767
- # 1. Reason about the task
801
+ # 1. Execute pre-hooks
802
+ run_input = cast(RunInput, run_response.input)
803
+ self.model = cast(Model, self.model)
804
+ if self.pre_hooks is not None:
805
+ # Can modify the run input
806
+ pre_hook_iterator = self._execute_pre_hooks(
807
+ hooks=self.pre_hooks, # type: ignore
808
+ run_response=run_response,
809
+ run_input=run_input,
810
+ session=session,
811
+ user_id=user_id,
812
+ debug_mode=debug_mode,
813
+ **kwargs,
814
+ )
815
+ # Consume the generator without yielding
816
+ deque(pre_hook_iterator, maxlen=0)
817
+
818
+ self._determine_tools_for_model(
819
+ model=self.model,
820
+ run_response=run_response,
821
+ session=session,
822
+ session_state=session_state,
823
+ dependencies=dependencies,
824
+ user_id=user_id,
825
+ async_mode=False,
826
+ knowledge_filters=knowledge_filters,
827
+ )
828
+
829
+ # 2. Prepare run messages
830
+ run_messages: RunMessages = self._get_run_messages(
831
+ run_response=run_response,
832
+ input=run_input.input_content,
833
+ session=session,
834
+ session_state=session_state,
835
+ user_id=user_id,
836
+ audio=run_input.audios,
837
+ images=run_input.images,
838
+ videos=run_input.videos,
839
+ files=run_input.files,
840
+ knowledge_filters=knowledge_filters,
841
+ add_history_to_context=add_history_to_context,
842
+ dependencies=dependencies,
843
+ add_dependencies_to_context=add_dependencies_to_context,
844
+ add_session_state_to_context=add_session_state_to_context,
845
+ metadata=metadata,
846
+ **kwargs,
847
+ )
848
+ if len(run_messages.messages) == 0:
849
+ log_error("No messages to be sent to the model.")
850
+
851
+ log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
852
+
853
+ # 3. Reason about the task
768
854
  self._handle_reasoning(run_response=run_response, run_messages=run_messages)
769
855
 
770
856
  # Check for cancellation before model call
771
857
  raise_if_cancelled(run_response.run_id) # type: ignore
772
858
 
773
- # 2. Generate a response from the Model (includes running function calls)
859
+ # 4. Generate a response from the Model (includes running function calls)
774
860
  self.model = cast(Model, self.model)
775
861
  model_response: ModelResponse = self.model.response(
776
862
  messages=run_messages.messages,
@@ -792,7 +878,7 @@ class Agent:
792
878
  # If a parser model is provided, structure the response separately
793
879
  self._parse_response_with_parser_model(model_response, run_messages)
794
880
 
795
- # 3. Update the RunOutput with the model response
881
+ # 5. Update the RunOutput with the model response
796
882
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
797
883
 
798
884
  if self.store_media:
@@ -806,9 +892,6 @@ class Agent:
806
892
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
807
893
  )
808
894
 
809
- # 4. Calculate session metrics
810
- self._update_session_metrics(session=session, run_response=run_response)
811
-
812
895
  run_response.status = RunStatus.completed
813
896
 
814
897
  # Convert the response to the structured format if needed
@@ -818,22 +901,36 @@ class Agent:
818
901
  if run_response.metrics:
819
902
  run_response.metrics.stop_timer()
820
903
 
821
- # 5. Optional: Save output to file if save_response_to_file is set
904
+ # 6. Execute post-hooks after output is generated but before response is returned
905
+ if self.post_hooks is not None:
906
+ self._execute_post_hooks(
907
+ hooks=self.post_hooks, # type: ignore
908
+ run_output=run_response,
909
+ session=session,
910
+ user_id=user_id,
911
+ debug_mode=debug_mode,
912
+ **kwargs,
913
+ )
914
+
915
+ # 7. Calculate session metrics
916
+ self._update_session_metrics(session=session, run_response=run_response)
917
+
918
+ # 8. Optional: Save output to file if save_response_to_file is set
822
919
  self.save_run_response_to_file(
823
920
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
824
921
  )
825
922
 
826
- # 6. Add the RunOutput to Agent Session
923
+ # 9. Add the RunOutput to Agent Session
827
924
  session.upsert_run(run=run_response)
828
925
 
829
- # 7. Update Agent Memory
926
+ # 10. Update Agent Memory
830
927
  response_iterator = self._make_memories_and_summaries(
831
928
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
832
929
  )
833
930
  # Consume the response iterator to ensure the memory is updated before the run is completed
834
931
  deque(response_iterator, maxlen=0)
835
932
 
836
- # 8. Save session to memory
933
+ # 11. Save session to memory
837
934
  self.save_session(session=session)
838
935
 
839
936
  # Log Agent Telemetry
@@ -849,42 +946,103 @@ class Agent:
849
946
  def _run_stream(
850
947
  self,
851
948
  run_response: RunOutput,
852
- run_messages: RunMessages,
853
949
  session: AgentSession,
950
+ session_state: Optional[Dict[str, Any]] = None,
854
951
  user_id: Optional[str] = None,
952
+ knowledge_filters: Optional[Dict[str, Any]] = None,
953
+ add_history_to_context: Optional[bool] = None,
954
+ add_dependencies_to_context: Optional[bool] = None,
955
+ add_session_state_to_context: Optional[bool] = None,
956
+ metadata: Optional[Dict[str, Any]] = None,
957
+ dependencies: Optional[Dict[str, Any]] = None,
855
958
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
856
959
  stream_intermediate_steps: bool = False,
857
960
  workflow_context: Optional[Dict] = None,
858
961
  yield_run_response: bool = False,
962
+ debug_mode: Optional[bool] = None,
963
+ **kwargs: Any,
859
964
  ) -> Iterator[Union[RunOutputEvent, RunOutput]]:
860
965
  """Run the Agent and yield the RunOutput.
861
966
 
862
967
  Steps:
863
- 1. Reason about the task if reasoning is enabled
864
- 2. Generate a response from the Model (includes running function calls)
865
- 3. Update Agent Memory
866
- 4. Calculate session metrics
867
- 5. Save session to storage
968
+ 1. Execute pre-hooks
969
+ 2. Prepare run messages
970
+ 3. Reason about the task if reasoning is enabled
971
+ 4. Generate a response from the Model (includes running function calls)
972
+ 5. Calculate session metrics
868
973
  6. Optional: Save output to file if save_response_to_file is set
869
974
  7. Add the RunOutput to the Agent Session
975
+ 8. Update Agent Memory
976
+ 9. Save session to storage
870
977
  """
871
- log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
872
978
 
873
979
  # Register run for cancellation tracking
874
980
  register_run(run_response.run_id) # type: ignore
875
981
 
982
+ # 1. Execute pre-hooks
983
+ run_input = cast(RunInput, run_response.input)
984
+ self.model = cast(Model, self.model)
985
+ if self.pre_hooks is not None:
986
+ # Can modify the run input
987
+ pre_hook_iterator = self._execute_pre_hooks(
988
+ hooks=self.pre_hooks, # type: ignore
989
+ run_response=run_response,
990
+ run_input=run_input,
991
+ session=session,
992
+ user_id=user_id,
993
+ debug_mode=debug_mode,
994
+ **kwargs,
995
+ )
996
+ for event in pre_hook_iterator:
997
+ yield event
998
+
999
+ self._determine_tools_for_model(
1000
+ model=self.model,
1001
+ run_response=run_response,
1002
+ session=session,
1003
+ session_state=session_state,
1004
+ dependencies=dependencies,
1005
+ user_id=user_id,
1006
+ async_mode=False,
1007
+ knowledge_filters=knowledge_filters,
1008
+ )
1009
+
1010
+ # 2. Prepare run messages
1011
+ run_messages: RunMessages = self._get_run_messages(
1012
+ run_response=run_response,
1013
+ input=run_input.input_content,
1014
+ session=session,
1015
+ session_state=session_state,
1016
+ user_id=user_id,
1017
+ audio=run_input.audios,
1018
+ images=run_input.images,
1019
+ videos=run_input.videos,
1020
+ files=run_input.files,
1021
+ knowledge_filters=knowledge_filters,
1022
+ add_history_to_context=add_history_to_context,
1023
+ dependencies=dependencies,
1024
+ add_dependencies_to_context=add_dependencies_to_context,
1025
+ add_session_state_to_context=add_session_state_to_context,
1026
+ metadata=metadata,
1027
+ **kwargs,
1028
+ )
1029
+ if len(run_messages.messages) == 0:
1030
+ log_error("No messages to be sent to the model.")
1031
+
1032
+ log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
1033
+
876
1034
  try:
877
1035
  # Start the Run by yielding a RunStarted event
878
1036
  if stream_intermediate_steps:
879
1037
  yield self._handle_event(create_run_started_event(run_response), run_response, workflow_context)
880
1038
 
881
- # 1. Reason about the task if reasoning is enabled
1039
+ # 3. Reason about the task if reasoning is enabled
882
1040
  yield from self._handle_reasoning_stream(run_response=run_response, run_messages=run_messages)
883
1041
 
884
1042
  # Check for cancellation before model processing
885
1043
  raise_if_cancelled(run_response.run_id) # type: ignore
886
1044
 
887
- # 2. Process model response
1045
+ # 4. Process model response
888
1046
  if self.output_model is None:
889
1047
  for event in self._handle_model_response_stream(
890
1048
  session=session,
@@ -946,20 +1104,21 @@ class Agent:
946
1104
  )
947
1105
  return
948
1106
 
949
- # 3. Calculate session metrics
950
- self._update_session_metrics(session=session, run_response=run_response)
951
-
952
1107
  run_response.status = RunStatus.completed
953
1108
 
954
- completed_event = self._handle_event(
955
- create_run_completed_event(from_run_response=run_response), run_response, workflow_context
956
- )
957
-
958
1109
  # Set the run duration
959
1110
  if run_response.metrics:
960
1111
  run_response.metrics.stop_timer()
961
1112
 
962
- # 4. Optional: Save output to file if save_response_to_file is set
1113
+ # TODO: For now we don't run post-hooks during streaming
1114
+
1115
+ # 5. Calculate session metrics
1116
+ self._update_session_metrics(session=session, run_response=run_response)
1117
+
1118
+ completed_event = self._handle_event(
1119
+ create_run_completed_event(from_run_response=run_response), run_response, workflow_context
1120
+ )
1121
+ # 6. Optional: Save output to file if save_response_to_file is set
963
1122
  self.save_run_response_to_file(
964
1123
  run_response=run_response,
965
1124
  input=run_messages.user_message,
@@ -967,15 +1126,15 @@ class Agent:
967
1126
  user_id=user_id,
968
1127
  )
969
1128
 
970
- # 5. Add RunOutput to Agent Session
1129
+ # 7. Add RunOutput to Agent Session
971
1130
  session.upsert_run(run=run_response)
972
1131
 
973
- # 6. Update Agent Memory
1132
+ # 8. Update Agent Memory
974
1133
  yield from self._make_memories_and_summaries(
975
1134
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
976
1135
  )
977
1136
 
978
- # 7. Save session to storage
1137
+ # 9. Save session to storage
979
1138
  self.save_session(session=session)
980
1139
 
981
1140
  if stream_intermediate_steps:
@@ -1092,6 +1251,14 @@ class Agent:
1092
1251
  # Validate input against input_schema if provided
1093
1252
  validated_input = self._validate_input(input)
1094
1253
 
1254
+ # Normalise hook & guardails
1255
+ if not self._hooks_normalised:
1256
+ if self.pre_hooks:
1257
+ self.pre_hooks = normalize_hooks(self.pre_hooks)
1258
+ if self.post_hooks:
1259
+ self.post_hooks = normalize_hooks(self.post_hooks)
1260
+ self._hooks_normalised = True
1261
+
1095
1262
  session_id, user_id, session_state = self._initialize_session(
1096
1263
  run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
1097
1264
  )
@@ -1105,7 +1272,7 @@ class Agent:
1105
1272
 
1106
1273
  # Create RunInput to capture the original user input
1107
1274
  run_input = RunInput(
1108
- input_content=input,
1275
+ input_content=validated_input,
1109
1276
  images=image_artifacts,
1110
1277
  videos=video_artifacts,
1111
1278
  audios=audio_artifacts,
@@ -1178,6 +1345,7 @@ class Agent:
1178
1345
  run_id=run_id,
1179
1346
  session_id=session_id,
1180
1347
  agent_id=self.id,
1348
+ user_id=user_id,
1181
1349
  agent_name=self.name,
1182
1350
  metadata=metadata,
1183
1351
  input=run_input,
@@ -1190,17 +1358,6 @@ class Agent:
1190
1358
  run_response.metrics = Metrics()
1191
1359
  run_response.metrics.start_timer()
1192
1360
 
1193
- self._determine_tools_for_model(
1194
- model=self.model,
1195
- run_response=run_response,
1196
- session=agent_session,
1197
- session_state=session_state,
1198
- dependencies=run_dependencies,
1199
- user_id=user_id,
1200
- async_mode=False,
1201
- knowledge_filters=effective_filters,
1202
- )
1203
-
1204
1361
  # If no retries are set, use the agent's default retries
1205
1362
  retries = retries if retries is not None else self.retries
1206
1363
 
@@ -1209,48 +1366,46 @@ class Agent:
1209
1366
 
1210
1367
  for attempt in range(num_attempts):
1211
1368
  try:
1212
- # Prepare run messages
1213
- run_messages: RunMessages = self._get_run_messages(
1214
- run_response=run_response,
1215
- input=validated_input,
1216
- session=agent_session,
1217
- session_state=session_state,
1218
- user_id=user_id,
1219
- audio=audio,
1220
- images=images,
1221
- videos=videos,
1222
- files=files,
1223
- knowledge_filters=effective_filters,
1224
- add_history_to_context=add_history,
1225
- dependencies=run_dependencies,
1226
- add_dependencies_to_context=add_dependencies,
1227
- add_session_state_to_context=add_session_state,
1228
- **kwargs,
1229
- )
1230
- if len(run_messages.messages) == 0:
1231
- log_error("No messages to be sent to the model.")
1232
-
1233
1369
  if stream:
1234
1370
  response_iterator = self._run_stream(
1235
1371
  run_response=run_response,
1236
- run_messages=run_messages,
1237
- user_id=user_id,
1238
1372
  session=agent_session,
1373
+ session_state=session_state,
1374
+ user_id=user_id,
1375
+ knowledge_filters=effective_filters,
1376
+ add_history_to_context=add_history,
1377
+ add_dependencies_to_context=add_dependencies,
1378
+ add_session_state_to_context=add_session_state,
1379
+ metadata=metadata,
1380
+ dependencies=run_dependencies,
1239
1381
  response_format=response_format,
1240
1382
  stream_intermediate_steps=stream_intermediate_steps,
1241
1383
  workflow_context=workflow_context,
1242
1384
  yield_run_response=yield_run_response,
1385
+ debug_mode=debug_mode,
1386
+ **kwargs,
1243
1387
  )
1244
1388
  return response_iterator
1245
1389
  else:
1246
1390
  response = self._run(
1247
1391
  run_response=run_response,
1248
- run_messages=run_messages,
1249
- user_id=user_id,
1250
1392
  session=agent_session,
1393
+ session_state=session_state,
1394
+ user_id=user_id,
1395
+ knowledge_filters=effective_filters,
1396
+ add_history_to_context=add_history,
1397
+ add_dependencies_to_context=add_dependencies,
1398
+ add_session_state_to_context=add_session_state,
1399
+ metadata=metadata,
1400
+ dependencies=run_dependencies,
1251
1401
  response_format=response_format,
1402
+ debug_mode=debug_mode,
1403
+ **kwargs,
1252
1404
  )
1253
1405
  return response
1406
+ except (InputCheckError, OutputCheckError) as e:
1407
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1408
+ raise e
1254
1409
  except ModelProviderError as e:
1255
1410
  log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
1256
1411
  if isinstance(e, StopAgentRun):
@@ -1303,14 +1458,9 @@ class Agent:
1303
1458
  async def _arun(
1304
1459
  self,
1305
1460
  run_response: RunOutput,
1306
- input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1307
1461
  session: AgentSession,
1308
1462
  session_state: Optional[Dict[str, Any]] = None,
1309
1463
  user_id: Optional[str] = None,
1310
- images: Optional[Sequence[Image]] = None,
1311
- videos: Optional[Sequence[Video]] = None,
1312
- audio: Optional[Sequence[Audio]] = None,
1313
- files: Optional[Sequence[File]] = None,
1314
1464
  knowledge_filters: Optional[Dict[str, Any]] = None,
1315
1465
  add_history_to_context: Optional[bool] = None,
1316
1466
  add_dependencies_to_context: Optional[bool] = None,
@@ -1318,36 +1468,72 @@ class Agent:
1318
1468
  metadata: Optional[Dict[str, Any]] = None,
1319
1469
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1320
1470
  dependencies: Optional[Dict[str, Any]] = None,
1471
+ debug_mode: Optional[bool] = None,
1321
1472
  **kwargs: Any,
1322
1473
  ) -> RunOutput:
1323
1474
  """Run the Agent and yield the RunOutput.
1324
1475
 
1325
1476
  Steps:
1326
1477
  1. Resolve dependencies
1327
- 2. Prepare run messages
1328
- 3. Reason about the task if reasoning is enabled
1329
- 4. Generate a response from the Model (includes running function calls)
1330
- 5. Update the RunOutput with the model response
1331
- 6. Update Agent Memory
1332
- 7. Calculate session metrics
1333
- 8. Add RunOutput to Agent Session
1334
- 9. Save session to storage
1478
+ 2. Execute pre-hooks
1479
+ 3. Prepare run messages
1480
+ 4. Reason about the task if reasoning is enabled
1481
+ 5. Generate a response from the Model (includes running function calls)
1482
+ 6. Update the RunOutput with the model response
1483
+ 7. Execute post-hooks
1484
+ 8. Calculate session metrics
1485
+ 9. Save output to file
1486
+ 10. Add RunOutput to Agent Session
1487
+ 11. Update Agent Memory
1488
+ 12. Save session to storage
1335
1489
  """
1490
+ # Register run for cancellation tracking
1491
+ register_run(run_response.run_id) # type: ignore
1492
+
1336
1493
  # 1. Resolving here for async requirement
1337
1494
  if dependencies is not None:
1338
1495
  await self._aresolve_run_dependencies(dependencies)
1339
1496
 
1340
- # 2. Prepare run messages
1497
+ # 2. Execute pre-hooks
1498
+ run_input = cast(RunInput, run_response.input)
1499
+ self.model = cast(Model, self.model)
1500
+ if self.pre_hooks is not None:
1501
+ # Can modify the run input
1502
+ pre_hook_iterator = self._aexecute_pre_hooks(
1503
+ hooks=self.pre_hooks, # type: ignore
1504
+ run_response=run_response,
1505
+ run_input=run_input,
1506
+ session=session,
1507
+ user_id=user_id,
1508
+ debug_mode=debug_mode,
1509
+ **kwargs,
1510
+ )
1511
+ # Consume the async iterator without yielding
1512
+ async for _ in pre_hook_iterator:
1513
+ pass
1514
+
1515
+ self._determine_tools_for_model(
1516
+ model=self.model,
1517
+ run_response=run_response,
1518
+ session=session,
1519
+ session_state=session_state,
1520
+ dependencies=dependencies,
1521
+ user_id=user_id,
1522
+ async_mode=True,
1523
+ knowledge_filters=knowledge_filters,
1524
+ )
1525
+
1526
+ # 3. Prepare run messages
1341
1527
  run_messages: RunMessages = self._get_run_messages(
1342
1528
  run_response=run_response,
1343
- input=input,
1529
+ input=run_input.input_content,
1344
1530
  session=session,
1345
1531
  session_state=session_state,
1346
1532
  user_id=user_id,
1347
- audio=audio,
1348
- images=images,
1349
- videos=videos,
1350
- files=files,
1533
+ audio=run_input.audios,
1534
+ images=run_input.images,
1535
+ videos=run_input.videos,
1536
+ files=run_input.files,
1351
1537
  knowledge_filters=knowledge_filters,
1352
1538
  add_history_to_context=add_history_to_context,
1353
1539
  dependencies=dependencies,
@@ -1361,17 +1547,13 @@ class Agent:
1361
1547
 
1362
1548
  log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
1363
1549
 
1364
- # Register run for cancellation tracking
1365
- register_run(run_response.run_id) # type: ignore
1366
-
1367
- self.model = cast(Model, self.model)
1368
- # 3. Reason about the task if reasoning is enabled
1550
+ # 4. Reason about the task if reasoning is enabled
1369
1551
  await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
1370
1552
 
1371
1553
  # Check for cancellation before model call
1372
1554
  raise_if_cancelled(run_response.run_id) # type: ignore
1373
1555
 
1374
- # 4. Generate a response from the Model (includes running function calls)
1556
+ # 5. Generate a response from the Model (includes running function calls)
1375
1557
  model_response: ModelResponse = await self.model.aresponse(
1376
1558
  messages=run_messages.messages,
1377
1559
  tools=self._tools_for_model,
@@ -1391,7 +1573,7 @@ class Agent:
1391
1573
  # If a parser model is provided, structure the response separately
1392
1574
  await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
1393
1575
 
1394
- # 5. Update the RunOutput with the model response
1576
+ # 6. Update the RunOutput with the model response
1395
1577
  self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
1396
1578
 
1397
1579
  if self.store_media:
@@ -1405,9 +1587,6 @@ class Agent:
1405
1587
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1406
1588
  )
1407
1589
 
1408
- # 6. Calculate session metrics
1409
- self._update_session_metrics(session=session, run_response=run_response)
1410
-
1411
1590
  run_response.status = RunStatus.completed
1412
1591
 
1413
1592
  # Convert the response to the structured format if needed
@@ -1417,21 +1596,35 @@ class Agent:
1417
1596
  if run_response.metrics:
1418
1597
  run_response.metrics.stop_timer()
1419
1598
 
1420
- # Optional: Save output to file if save_response_to_file is set
1599
+ # 7. Execute post-hooks after output is generated but before response is returned
1600
+ if self.post_hooks is not None:
1601
+ await self._aexecute_post_hooks(
1602
+ hooks=self.post_hooks, # type: ignore
1603
+ run_output=run_response,
1604
+ session=session,
1605
+ user_id=user_id,
1606
+ debug_mode=debug_mode,
1607
+ **kwargs,
1608
+ )
1609
+
1610
+ # 8. Calculate session metrics
1611
+ self._update_session_metrics(session=session, run_response=run_response)
1612
+
1613
+ # 9. Optional: Save output to file if save_response_to_file is set
1421
1614
  self.save_run_response_to_file(
1422
1615
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
1423
1616
  )
1424
1617
 
1425
- # 7. Add RunOutput to Agent Session
1618
+ # 10. Add RunOutput to Agent Session
1426
1619
  session.upsert_run(run=run_response)
1427
1620
 
1428
- # 8. Update Agent Memory
1621
+ # 11. Update Agent Memory
1429
1622
  async for _ in self._amake_memories_and_summaries(
1430
1623
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1431
1624
  ):
1432
1625
  pass
1433
1626
 
1434
- # 9. Save session to storage
1627
+ # 12. Save session to storage
1435
1628
  self.save_session(session=session)
1436
1629
 
1437
1630
  # Log Agent Telemetry
@@ -1448,52 +1641,79 @@ class Agent:
1448
1641
  self,
1449
1642
  run_response: RunOutput,
1450
1643
  session: AgentSession,
1451
- input: Union[str, List, Dict, Message, BaseModel, List[Message]],
1452
1644
  session_state: Optional[Dict[str, Any]] = None,
1453
- audio: Optional[Sequence[Audio]] = None,
1454
- images: Optional[Sequence[Image]] = None,
1455
- videos: Optional[Sequence[Video]] = None,
1456
- files: Optional[Sequence[File]] = None,
1645
+ user_id: Optional[str] = None,
1457
1646
  knowledge_filters: Optional[Dict[str, Any]] = None,
1458
1647
  add_history_to_context: Optional[bool] = None,
1459
1648
  add_dependencies_to_context: Optional[bool] = None,
1460
1649
  add_session_state_to_context: Optional[bool] = None,
1461
1650
  metadata: Optional[Dict[str, Any]] = None,
1462
1651
  dependencies: Optional[Dict[str, Any]] = None,
1463
- user_id: Optional[str] = None,
1464
1652
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1465
1653
  stream_intermediate_steps: bool = False,
1466
1654
  workflow_context: Optional[Dict] = None,
1467
1655
  yield_run_response: Optional[bool] = None,
1656
+ debug_mode: Optional[bool] = None,
1468
1657
  **kwargs: Any,
1469
1658
  ) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
1470
1659
  """Run the Agent and yield the RunOutput.
1471
1660
 
1472
1661
  Steps:
1473
1662
  1. Resolve dependencies
1474
- 2. Prepare run messages
1475
- 3. Reason about the task if reasoning is enabled
1476
- 4. Generate a response from the Model (includes running function calls)
1477
- 5. Update Agent Memory
1663
+ 2. Execute pre-hooks
1664
+ 3. Prepare run messages
1665
+ 4. Reason about the task if reasoning is enabled
1666
+ 5. Generate a response from the Model (includes running function calls)
1478
1667
  6. Calculate session metrics
1479
1668
  7. Add RunOutput to Agent Session
1480
- 8. Save session to storage
1669
+ 8. Update Agent Memory
1670
+ 9. Save session to storage
1481
1671
  """
1672
+
1482
1673
  # 1. Resolving here for async requirement
1483
1674
  if dependencies is not None:
1484
1675
  await self._aresolve_run_dependencies(dependencies=dependencies)
1485
1676
 
1486
- # 2. Prepare run messages
1677
+ # 2. Execute pre-hooks
1678
+ run_input = cast(RunInput, run_response.input)
1679
+ self.model = cast(Model, self.model)
1680
+
1681
+ if self.pre_hooks is not None:
1682
+ # Can modify the run input
1683
+ pre_hook_iterator = self._aexecute_pre_hooks(
1684
+ hooks=self.pre_hooks, # type: ignore
1685
+ run_response=run_response,
1686
+ run_input=run_input,
1687
+ session=session,
1688
+ user_id=user_id,
1689
+ debug_mode=debug_mode,
1690
+ **kwargs,
1691
+ )
1692
+ async for event in pre_hook_iterator:
1693
+ yield event
1694
+
1695
+ self._determine_tools_for_model(
1696
+ model=self.model,
1697
+ run_response=run_response,
1698
+ session=session,
1699
+ session_state=session_state,
1700
+ dependencies=dependencies,
1701
+ user_id=user_id,
1702
+ async_mode=True,
1703
+ knowledge_filters=knowledge_filters,
1704
+ )
1705
+
1706
+ # 3. Prepare run messages
1487
1707
  run_messages: RunMessages = self._get_run_messages(
1488
1708
  run_response=run_response,
1489
- input=input,
1709
+ input=run_input.input_content,
1490
1710
  session=session,
1491
1711
  session_state=session_state,
1492
1712
  user_id=user_id,
1493
- audio=audio,
1494
- images=images,
1495
- videos=videos,
1496
- files=files,
1713
+ audio=run_input.audios,
1714
+ images=run_input.images,
1715
+ videos=run_input.videos,
1716
+ files=run_input.files,
1497
1717
  knowledge_filters=knowledge_filters,
1498
1718
  add_history_to_context=add_history_to_context,
1499
1719
  dependencies=dependencies,
@@ -1513,7 +1733,7 @@ class Agent:
1513
1733
  if stream_intermediate_steps:
1514
1734
  yield self._handle_event(create_run_started_event(run_response), run_response, workflow_context)
1515
1735
 
1516
- # 3. Reason about the task if reasoning is enabled
1736
+ # 4. Reason about the task if reasoning is enabled
1517
1737
  async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
1518
1738
  raise_if_cancelled(run_response.run_id) # type: ignore
1519
1739
  yield item
@@ -1521,7 +1741,7 @@ class Agent:
1521
1741
  # Check for cancellation before model processing
1522
1742
  raise_if_cancelled(run_response.run_id) # type: ignore
1523
1743
 
1524
- # 4. Generate a response from the Model
1744
+ # 5. Generate a response from the Model
1525
1745
  if self.output_model is None:
1526
1746
  async for event in self._ahandle_model_response_stream(
1527
1747
  session=session,
@@ -1585,18 +1805,18 @@ class Agent:
1585
1805
  yield item
1586
1806
  return
1587
1807
 
1588
- # 5. Calculate session metrics
1589
- self._update_session_metrics(session=session, run_response=run_response)
1590
-
1591
1808
  run_response.status = RunStatus.completed
1592
1809
 
1810
+ # Set the run duration
1811
+ if run_response.metrics:
1812
+ run_response.metrics.stop_timer()
1813
+
1593
1814
  completed_event = self._handle_event(
1594
1815
  create_run_completed_event(from_run_response=run_response), run_response, workflow_context
1595
1816
  )
1596
1817
 
1597
- # Set the run duration
1598
- if run_response.metrics:
1599
- run_response.metrics.stop_timer()
1818
+ # 6. Calculate session metrics
1819
+ self._update_session_metrics(session=session, run_response=run_response)
1600
1820
 
1601
1821
  # Optional: Save output to file if save_response_to_file is set
1602
1822
  self.save_run_response_to_file(
@@ -1606,16 +1826,16 @@ class Agent:
1606
1826
  user_id=user_id,
1607
1827
  )
1608
1828
 
1609
- # 6. Add RunOutput to Agent Session
1829
+ # 7. Add RunOutput to Agent Session
1610
1830
  session.upsert_run(run=run_response)
1611
1831
 
1612
- # 7. Update Agent Memory
1832
+ # 8. Update Agent Memory
1613
1833
  async for event in self._amake_memories_and_summaries(
1614
1834
  run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
1615
1835
  ):
1616
1836
  yield event
1617
1837
 
1618
- # 8. Save session to storage
1838
+ # 9. Save session to storage
1619
1839
  self.save_session(session=session)
1620
1840
 
1621
1841
  if stream_intermediate_steps:
@@ -1731,6 +1951,14 @@ class Agent:
1731
1951
  # Validate input against input_schema if provided
1732
1952
  validated_input = self._validate_input(input)
1733
1953
 
1954
+ # Normalise hook & guardails
1955
+ if not self._hooks_normalised:
1956
+ if self.pre_hooks:
1957
+ self.pre_hooks = normalize_hooks(self.pre_hooks, async_mode=True)
1958
+ if self.post_hooks:
1959
+ self.post_hooks = normalize_hooks(self.post_hooks, async_mode=True)
1960
+ self._hooks_normalised = True
1961
+
1734
1962
  session_id, user_id, session_state = self._initialize_session(
1735
1963
  run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
1736
1964
  )
@@ -1744,7 +1972,7 @@ class Agent:
1744
1972
 
1745
1973
  # Create RunInput to capture the original user input
1746
1974
  run_input = RunInput(
1747
- input_content=input,
1975
+ input_content=validated_input,
1748
1976
  images=image_artifacts,
1749
1977
  videos=video_artifacts,
1750
1978
  audios=audio_artifacts,
@@ -1806,11 +2034,15 @@ class Agent:
1806
2034
  else:
1807
2035
  merge_dictionaries(metadata, self.metadata)
1808
2036
 
2037
+ # If no retries are set, use the agent's default retries
2038
+ retries = retries if retries is not None else self.retries
2039
+
1809
2040
  # Create a new run_response for this attempt
1810
2041
  run_response = RunOutput(
1811
2042
  run_id=run_id,
1812
2043
  session_id=session_id,
1813
2044
  agent_id=self.id,
2045
+ user_id=user_id,
1814
2046
  agent_name=self.name,
1815
2047
  metadata=metadata,
1816
2048
  input=run_input,
@@ -1823,20 +2055,6 @@ class Agent:
1823
2055
  run_response.metrics = Metrics()
1824
2056
  run_response.metrics.start_timer()
1825
2057
 
1826
- self._determine_tools_for_model(
1827
- model=self.model,
1828
- run_response=run_response,
1829
- session=agent_session,
1830
- session_state=session_state,
1831
- dependencies=run_dependencies,
1832
- user_id=user_id,
1833
- async_mode=True,
1834
- knowledge_filters=effective_filters,
1835
- )
1836
-
1837
- # If no retries are set, use the agent's default retries
1838
- retries = retries if retries is not None else self.retries
1839
-
1840
2058
  last_exception = None
1841
2059
  num_attempts = retries + 1
1842
2060
 
@@ -1846,15 +2064,10 @@ class Agent:
1846
2064
  if stream:
1847
2065
  return self._arun_stream( # type: ignore
1848
2066
  run_response=run_response,
1849
- input=validated_input,
1850
- user_id=user_id,
1851
2067
  session=agent_session,
2068
+ user_id=user_id,
1852
2069
  session_state=session_state,
1853
- audio=audio,
1854
- images=images,
1855
- videos=videos,
1856
- files=files,
1857
- knowledge_filters=knowledge_filters,
2070
+ knowledge_filters=effective_filters,
1858
2071
  add_history_to_context=add_history,
1859
2072
  add_dependencies_to_context=add_dependencies,
1860
2073
  add_session_state_to_context=add_session_state,
@@ -1864,19 +2077,15 @@ class Agent:
1864
2077
  workflow_context=workflow_context,
1865
2078
  yield_run_response=yield_run_response,
1866
2079
  dependencies=run_dependencies,
2080
+ debug_mode=debug_mode,
1867
2081
  **kwargs,
1868
2082
  ) # type: ignore[assignment]
1869
2083
  else:
1870
2084
  return self._arun( # type: ignore
1871
2085
  run_response=run_response,
1872
- input=validated_input,
1873
2086
  user_id=user_id,
1874
2087
  session=agent_session,
1875
2088
  session_state=session_state,
1876
- audio=audio,
1877
- images=images,
1878
- videos=videos,
1879
- files=files,
1880
2089
  knowledge_filters=knowledge_filters,
1881
2090
  add_history_to_context=add_history,
1882
2091
  add_dependencies_to_context=add_dependencies,
@@ -1887,8 +2096,13 @@ class Agent:
1887
2096
  workflow_context=workflow_context,
1888
2097
  yield_run_response=yield_run_response,
1889
2098
  dependencies=run_dependencies,
2099
+ debug_mode=debug_mode,
1890
2100
  **kwargs,
1891
2101
  )
2102
+
2103
+ except (InputCheckError, OutputCheckError) as e:
2104
+ log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
2105
+ raise e
1892
2106
  except ModelProviderError as e:
1893
2107
  log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
1894
2108
  if isinstance(e, StopAgentRun):
@@ -1986,6 +2200,7 @@ class Agent:
1986
2200
  knowledge_filters: Optional[Dict[str, Any]] = None,
1987
2201
  dependencies: Optional[Dict[str, Any]] = None,
1988
2202
  debug_mode: Optional[bool] = None,
2203
+ **kwargs,
1989
2204
  ) -> Union[RunOutput, Iterator[RunOutputEvent]]:
1990
2205
  """Continue a previous run.
1991
2206
 
@@ -2084,6 +2299,7 @@ class Agent:
2084
2299
  run_response=run_response,
2085
2300
  session=agent_session,
2086
2301
  session_state=session_state,
2302
+ dependencies=run_dependencies,
2087
2303
  user_id=user_id,
2088
2304
  async_mode=False,
2089
2305
  knowledge_filters=effective_filters,
@@ -2122,6 +2338,8 @@ class Agent:
2122
2338
  user_id=user_id,
2123
2339
  session=agent_session,
2124
2340
  response_format=response_format,
2341
+ debug_mode=debug_mode,
2342
+ **kwargs,
2125
2343
  )
2126
2344
  return response
2127
2345
  except ModelProviderError as e:
@@ -2168,6 +2386,8 @@ class Agent:
2168
2386
  session: AgentSession,
2169
2387
  user_id: Optional[str] = None,
2170
2388
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2389
+ debug_mode: Optional[bool] = None,
2390
+ **kwargs,
2171
2391
  ) -> RunOutput:
2172
2392
  """Continue a previous run.
2173
2393
 
@@ -2216,6 +2436,16 @@ class Agent:
2216
2436
  if run_response.metrics:
2217
2437
  run_response.metrics.stop_timer()
2218
2438
 
2439
+ if self.post_hooks is not None:
2440
+ self._execute_post_hooks(
2441
+ hooks=self.post_hooks, # type: ignore
2442
+ run_output=run_response,
2443
+ session=session,
2444
+ user_id=user_id,
2445
+ debug_mode=debug_mode,
2446
+ **kwargs,
2447
+ )
2448
+
2219
2449
  # 4. Save output to file if save_response_to_file is set
2220
2450
  self.save_run_response_to_file(
2221
2451
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
@@ -2371,6 +2601,7 @@ class Agent:
2371
2601
  knowledge_filters: Optional[Dict[str, Any]] = None,
2372
2602
  dependencies: Optional[Dict[str, Any]] = None,
2373
2603
  debug_mode: Optional[bool] = None,
2604
+ **kwargs,
2374
2605
  ) -> Union[RunOutput, AsyncIterator[Union[RunOutputEvent, RunOutput]]]:
2375
2606
  """Continue a previous run.
2376
2607
 
@@ -2411,10 +2642,6 @@ class Agent:
2411
2642
 
2412
2643
  run_dependencies = dependencies if dependencies is not None else self.dependencies
2413
2644
 
2414
- # Resolve dependencies
2415
- if run_dependencies is not None:
2416
- self._resolve_run_dependencies(dependencies=run_dependencies)
2417
-
2418
2645
  effective_filters = knowledge_filters
2419
2646
 
2420
2647
  # When filters are passed manually
@@ -2506,6 +2733,8 @@ class Agent:
2506
2733
  session=agent_session,
2507
2734
  response_format=response_format,
2508
2735
  dependencies=run_dependencies,
2736
+ debug_mode=debug_mode,
2737
+ **kwargs,
2509
2738
  )
2510
2739
  except ModelProviderError as e:
2511
2740
  log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
@@ -2551,6 +2780,8 @@ class Agent:
2551
2780
  user_id: Optional[str] = None,
2552
2781
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2553
2782
  dependencies: Optional[Dict[str, Any]] = None,
2783
+ debug_mode: Optional[bool] = None,
2784
+ **kwargs,
2554
2785
  ) -> RunOutput:
2555
2786
  """Continue a previous run.
2556
2787
 
@@ -2602,6 +2833,16 @@ class Agent:
2602
2833
  if run_response.metrics:
2603
2834
  run_response.metrics.stop_timer()
2604
2835
 
2836
+ if self.post_hooks is not None:
2837
+ await self._aexecute_post_hooks(
2838
+ hooks=self.post_hooks, # type: ignore
2839
+ run_output=run_response,
2840
+ session=session,
2841
+ user_id=user_id,
2842
+ debug_mode=debug_mode,
2843
+ **kwargs,
2844
+ )
2845
+
2605
2846
  # 4. Save output to file if save_response_to_file is set
2606
2847
  self.save_run_response_to_file(
2607
2848
  run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
@@ -2647,12 +2888,9 @@ class Agent:
2647
2888
  6. Save output to file if save_response_to_file is set
2648
2889
  7. Save session to storage
2649
2890
  """
2650
- # Resolving here for async requirement
2651
- run_dependencies = dependencies if dependencies is not None else self.dependencies
2652
-
2653
2891
  # Resolve dependencies
2654
- if run_dependencies is not None:
2655
- await self._aresolve_run_dependencies(dependencies=run_dependencies)
2892
+ if dependencies is not None:
2893
+ await self._aresolve_run_dependencies(dependencies=dependencies)
2656
2894
 
2657
2895
  # Start the Run by yielding a RunContinued event
2658
2896
  if stream_intermediate_steps:
@@ -2716,6 +2954,202 @@ class Agent:
2716
2954
 
2717
2955
  log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
2718
2956
 
2957
+ def _execute_pre_hooks(
2958
+ self,
2959
+ hooks: Optional[List[Callable[..., Any]]],
2960
+ run_response: RunOutput,
2961
+ run_input: RunInput,
2962
+ session: AgentSession,
2963
+ user_id: Optional[str] = None,
2964
+ debug_mode: Optional[bool] = None,
2965
+ **kwargs: Any,
2966
+ ) -> Iterator[RunOutputEvent]:
2967
+ """Execute multiple pre-hook functions in succession."""
2968
+ if hooks is None:
2969
+ return
2970
+
2971
+ # Prepare all possible arguments once
2972
+ all_args = {
2973
+ "run_input": run_input,
2974
+ "agent": self,
2975
+ "session": session,
2976
+ "user_id": user_id,
2977
+ "debug_mode": debug_mode or self.debug_mode,
2978
+ }
2979
+ all_args.update(kwargs)
2980
+
2981
+ for i, hook in enumerate(hooks):
2982
+ yield self._handle_event(
2983
+ run_response=run_response,
2984
+ event=create_pre_hook_started_event(
2985
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
2986
+ ),
2987
+ )
2988
+ try:
2989
+ # Filter arguments to only include those that the hook accepts
2990
+ filtered_args = filter_hook_args(hook, all_args)
2991
+
2992
+ hook(**filtered_args)
2993
+
2994
+ yield self._handle_event(
2995
+ run_response=run_response,
2996
+ event=create_pre_hook_completed_event(
2997
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
2998
+ ),
2999
+ )
3000
+
3001
+ except (InputCheckError, OutputCheckError) as e:
3002
+ raise e
3003
+ except Exception as e:
3004
+ log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
3005
+ log_exception(e)
3006
+ finally:
3007
+ # Reset global log mode incase an agent in the pre-hook changed it
3008
+ self._set_debug(debug_mode=debug_mode)
3009
+
3010
+ # Update the input on the run_response
3011
+ run_response.input = run_input
3012
+
3013
+ async def _aexecute_pre_hooks(
3014
+ self,
3015
+ hooks: Optional[List[Callable[..., Any]]],
3016
+ run_response: RunOutput,
3017
+ run_input: RunInput,
3018
+ session: AgentSession,
3019
+ user_id: Optional[str] = None,
3020
+ debug_mode: Optional[bool] = None,
3021
+ **kwargs: Any,
3022
+ ) -> AsyncIterator[RunOutputEvent]:
3023
+ """Execute multiple pre-hook functions in succession (async version)."""
3024
+ if hooks is None:
3025
+ return
3026
+
3027
+ # Prepare all possible arguments once
3028
+ all_args = {
3029
+ "run_input": run_input,
3030
+ "agent": self,
3031
+ "session": session,
3032
+ "user_id": user_id,
3033
+ "debug_mode": debug_mode or self.debug_mode,
3034
+ }
3035
+ all_args.update(kwargs)
3036
+
3037
+ for i, hook in enumerate(hooks):
3038
+ yield self._handle_event(
3039
+ run_response=run_response,
3040
+ event=create_pre_hook_started_event(
3041
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
3042
+ ),
3043
+ )
3044
+ try:
3045
+ # Filter arguments to only include those that the hook accepts
3046
+ filtered_args = filter_hook_args(hook, all_args)
3047
+
3048
+ if asyncio.iscoroutinefunction(hook):
3049
+ await hook(**filtered_args)
3050
+ else:
3051
+ # Synchronous function
3052
+ hook(**filtered_args)
3053
+
3054
+ yield self._handle_event(
3055
+ run_response=run_response,
3056
+ event=create_pre_hook_completed_event(
3057
+ from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
3058
+ ),
3059
+ )
3060
+
3061
+ except (InputCheckError, OutputCheckError) as e:
3062
+ raise e
3063
+ except Exception as e:
3064
+ log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
3065
+ log_exception(e)
3066
+ finally:
3067
+ # Reset global log mode incase an agent in the pre-hook changed it
3068
+ self._set_debug(debug_mode=debug_mode)
3069
+
3070
+ # Update the input on the run_response
3071
+ run_response.input = run_input
3072
+
3073
+ def _execute_post_hooks(
3074
+ self,
3075
+ hooks: Optional[List[Callable[..., Any]]],
3076
+ run_output: RunOutput,
3077
+ session: AgentSession,
3078
+ user_id: Optional[str] = None,
3079
+ debug_mode: Optional[bool] = None,
3080
+ **kwargs: Any,
3081
+ ) -> None:
3082
+ """Execute multiple post-hook functions in succession."""
3083
+ if hooks is None:
3084
+ return
3085
+
3086
+ # Prepare all possible arguments once
3087
+ all_args = {
3088
+ "run_output": run_output,
3089
+ "agent": self,
3090
+ "session": session,
3091
+ "user_id": user_id,
3092
+ "debug_mode": debug_mode or self.debug_mode,
3093
+ }
3094
+ all_args.update(kwargs)
3095
+
3096
+ for i, hook in enumerate(hooks):
3097
+ try:
3098
+ # Filter arguments to only include those that the hook accepts
3099
+ filtered_args = filter_hook_args(hook, all_args)
3100
+
3101
+ hook(**filtered_args)
3102
+ except (InputCheckError, OutputCheckError) as e:
3103
+ raise e
3104
+ except Exception as e:
3105
+ log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
3106
+ log_exception(e)
3107
+ finally:
3108
+ # Reset global log mode incase an agent in the pre-hook changed it
3109
+ self._set_debug(debug_mode=debug_mode)
3110
+
3111
+ async def _aexecute_post_hooks(
3112
+ self,
3113
+ hooks: Optional[List[Callable[..., Any]]],
3114
+ run_output: RunOutput,
3115
+ session: AgentSession,
3116
+ user_id: Optional[str] = None,
3117
+ debug_mode: Optional[bool] = None,
3118
+ **kwargs: Any,
3119
+ ) -> None:
3120
+ """Execute multiple post-hook functions in succession (async version)."""
3121
+ if hooks is None:
3122
+ return
3123
+
3124
+ # Prepare all possible arguments once
3125
+ all_args = {
3126
+ "run_output": run_output,
3127
+ "agent": self,
3128
+ "session": session,
3129
+ "user_id": user_id,
3130
+ "debug_mode": debug_mode or self.debug_mode,
3131
+ }
3132
+ all_args.update(kwargs)
3133
+
3134
+ for i, hook in enumerate(hooks):
3135
+ try:
3136
+ # Filter arguments to only include those that the hook accepts
3137
+ filtered_args = filter_hook_args(hook, all_args)
3138
+
3139
+ if asyncio.iscoroutinefunction(hook):
3140
+ await hook(**filtered_args)
3141
+ else:
3142
+ hook(**filtered_args)
3143
+
3144
+ except (InputCheckError, OutputCheckError) as e:
3145
+ raise e
3146
+ except Exception as e:
3147
+ log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
3148
+ log_exception(e)
3149
+ finally:
3150
+ # Reset global log mode incase an agent in the pre-hook changed it
3151
+ self._set_debug(debug_mode=debug_mode)
3152
+
2719
3153
  def _handle_agent_run_paused(
2720
3154
  self,
2721
3155
  run_response: RunOutput,
@@ -6946,7 +7380,6 @@ class Agent:
6946
7380
 
6947
7381
  if self.output_schema is not None:
6948
7382
  markdown = False
6949
- markdown = False
6950
7383
 
6951
7384
  if stream is None:
6952
7385
  stream = self.stream or False