agno 2.0.11__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +607 -176
- agno/db/in_memory/in_memory_db.py +42 -29
- agno/db/mongo/mongo.py +65 -66
- agno/db/postgres/postgres.py +6 -4
- agno/db/utils.py +50 -22
- agno/exceptions.py +62 -1
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +51 -0
- agno/knowledge/embedder/aws_bedrock.py +9 -4
- agno/knowledge/embedder/azure_openai.py +54 -0
- agno/knowledge/embedder/base.py +2 -0
- agno/knowledge/embedder/cohere.py +184 -5
- agno/knowledge/embedder/google.py +79 -1
- agno/knowledge/embedder/huggingface.py +9 -4
- agno/knowledge/embedder/jina.py +63 -0
- agno/knowledge/embedder/mistral.py +78 -11
- agno/knowledge/embedder/ollama.py +5 -0
- agno/knowledge/embedder/openai.py +18 -54
- agno/knowledge/embedder/voyageai.py +69 -16
- agno/knowledge/knowledge.py +11 -4
- agno/knowledge/reader/pdf_reader.py +4 -3
- agno/knowledge/reader/website_reader.py +3 -2
- agno/models/base.py +125 -32
- agno/models/cerebras/cerebras.py +1 -0
- agno/models/cerebras/cerebras_openai.py +1 -0
- agno/models/dashscope/dashscope.py +1 -0
- agno/models/google/gemini.py +27 -5
- agno/models/openai/chat.py +13 -4
- agno/models/openai/responses.py +1 -1
- agno/models/perplexity/perplexity.py +2 -3
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +49 -0
- agno/models/vllm/vllm.py +1 -0
- agno/models/xai/xai.py +1 -0
- agno/os/app.py +98 -126
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/agui/agui.py +21 -5
- agno/os/interfaces/base.py +4 -2
- agno/os/interfaces/slack/slack.py +13 -8
- agno/os/interfaces/whatsapp/router.py +2 -0
- agno/os/interfaces/whatsapp/whatsapp.py +12 -5
- agno/os/mcp.py +2 -2
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +182 -46
- agno/os/routers/home.py +2 -2
- agno/os/routers/memory/memory.py +23 -1
- agno/os/routers/memory/schemas.py +1 -1
- agno/os/routers/session/session.py +20 -3
- agno/os/utils.py +74 -8
- agno/run/agent.py +120 -77
- agno/run/base.py +2 -13
- agno/run/team.py +115 -72
- agno/run/workflow.py +5 -15
- agno/session/summary.py +9 -10
- agno/session/team.py +2 -1
- agno/team/team.py +721 -169
- agno/tools/firecrawl.py +4 -4
- agno/tools/function.py +42 -2
- agno/tools/knowledge.py +3 -3
- agno/tools/searxng.py +2 -2
- agno/tools/serper.py +2 -2
- agno/tools/spider.py +2 -2
- agno/tools/workflow.py +4 -5
- agno/utils/events.py +66 -1
- agno/utils/hooks.py +57 -0
- agno/utils/media.py +11 -9
- agno/utils/print_response/agent.py +43 -5
- agno/utils/print_response/team.py +48 -12
- agno/utils/serialize.py +32 -0
- agno/vectordb/cassandra/cassandra.py +44 -4
- agno/vectordb/chroma/chromadb.py +79 -8
- agno/vectordb/clickhouse/clickhousedb.py +43 -6
- agno/vectordb/couchbase/couchbase.py +76 -5
- agno/vectordb/lancedb/lance_db.py +38 -3
- agno/vectordb/milvus/milvus.py +76 -4
- agno/vectordb/mongodb/mongodb.py +76 -4
- agno/vectordb/pgvector/pgvector.py +50 -6
- agno/vectordb/pineconedb/pineconedb.py +39 -2
- agno/vectordb/qdrant/qdrant.py +76 -26
- agno/vectordb/singlestore/singlestore.py +77 -4
- agno/vectordb/upstashdb/upstashdb.py +42 -2
- agno/vectordb/weaviate/weaviate.py +39 -3
- agno/workflow/types.py +5 -6
- agno/workflow/workflow.py +58 -2
- {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/METADATA +4 -3
- {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/RECORD +93 -82
- {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/WHEEL +0 -0
- {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -28,7 +28,14 @@ from uuid import uuid4
|
|
|
28
28
|
from pydantic import BaseModel
|
|
29
29
|
|
|
30
30
|
from agno.db.base import BaseDb, SessionType, UserMemory
|
|
31
|
-
from agno.exceptions import
|
|
31
|
+
from agno.exceptions import (
|
|
32
|
+
InputCheckError,
|
|
33
|
+
ModelProviderError,
|
|
34
|
+
OutputCheckError,
|
|
35
|
+
RunCancelledException,
|
|
36
|
+
StopAgentRun,
|
|
37
|
+
)
|
|
38
|
+
from agno.guardrails import BaseGuardrail
|
|
32
39
|
from agno.knowledge.knowledge import Knowledge
|
|
33
40
|
from agno.knowledge.types import KnowledgeFilter
|
|
34
41
|
from agno.media import Audio, File, Image, Video
|
|
@@ -64,6 +71,8 @@ from agno.utils.events import (
|
|
|
64
71
|
create_memory_update_started_event,
|
|
65
72
|
create_parser_model_response_completed_event,
|
|
66
73
|
create_parser_model_response_started_event,
|
|
74
|
+
create_pre_hook_completed_event,
|
|
75
|
+
create_pre_hook_started_event,
|
|
67
76
|
create_reasoning_completed_event,
|
|
68
77
|
create_reasoning_started_event,
|
|
69
78
|
create_reasoning_step_event,
|
|
@@ -77,6 +86,7 @@ from agno.utils.events import (
|
|
|
77
86
|
create_tool_call_completed_event,
|
|
78
87
|
create_tool_call_started_event,
|
|
79
88
|
)
|
|
89
|
+
from agno.utils.hooks import filter_hook_args, normalize_hooks
|
|
80
90
|
from agno.utils.knowledge import get_agentic_or_user_search_filters
|
|
81
91
|
from agno.utils.log import (
|
|
82
92
|
log_debug,
|
|
@@ -209,6 +219,12 @@ class Agent:
|
|
|
209
219
|
# A function that acts as middleware and is called around tool calls.
|
|
210
220
|
tool_hooks: Optional[List[Callable]] = None
|
|
211
221
|
|
|
222
|
+
# --- Agent Hooks ---
|
|
223
|
+
# Functions called right after agent-session is loaded, before processing starts
|
|
224
|
+
pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
|
|
225
|
+
# Functions called after output is generated but before the response is returned
|
|
226
|
+
post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None
|
|
227
|
+
|
|
212
228
|
# --- Agent Reasoning ---
|
|
213
229
|
# Enable reasoning by working through the problem step by step.
|
|
214
230
|
reasoning: bool = False
|
|
@@ -379,6 +395,8 @@ class Agent:
|
|
|
379
395
|
tool_call_limit: Optional[int] = None,
|
|
380
396
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
381
397
|
tool_hooks: Optional[List[Callable]] = None,
|
|
398
|
+
pre_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
|
|
399
|
+
post_hooks: Optional[Union[List[Callable[..., Any]], List[BaseGuardrail]]] = None,
|
|
382
400
|
reasoning: bool = False,
|
|
383
401
|
reasoning_model: Optional[Model] = None,
|
|
384
402
|
reasoning_agent: Optional[Agent] = None,
|
|
@@ -481,6 +499,10 @@ class Agent:
|
|
|
481
499
|
self.tool_choice = tool_choice
|
|
482
500
|
self.tool_hooks = tool_hooks
|
|
483
501
|
|
|
502
|
+
# Initialize hooks with backward compatibility
|
|
503
|
+
self.pre_hooks = pre_hooks
|
|
504
|
+
self.post_hooks = post_hooks
|
|
505
|
+
|
|
484
506
|
self.reasoning = reasoning
|
|
485
507
|
self.reasoning_model = reasoning_model
|
|
486
508
|
self.reasoning_agent = reasoning_agent
|
|
@@ -553,6 +575,8 @@ class Agent:
|
|
|
553
575
|
|
|
554
576
|
self._formatter: Optional[SafeFormatter] = None
|
|
555
577
|
|
|
578
|
+
self._hooks_normalised = False
|
|
579
|
+
|
|
556
580
|
def set_id(self) -> None:
|
|
557
581
|
if self.id is None:
|
|
558
582
|
self.id = generate_id_from_name(self.name)
|
|
@@ -742,35 +766,97 @@ class Agent:
|
|
|
742
766
|
def _run(
|
|
743
767
|
self,
|
|
744
768
|
run_response: RunOutput,
|
|
745
|
-
run_messages: RunMessages,
|
|
746
769
|
session: AgentSession,
|
|
770
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
747
771
|
user_id: Optional[str] = None,
|
|
772
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
773
|
+
add_history_to_context: Optional[bool] = None,
|
|
774
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
775
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
776
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
748
777
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
778
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
779
|
+
debug_mode: Optional[bool] = None,
|
|
780
|
+
**kwargs: Any,
|
|
749
781
|
) -> RunOutput:
|
|
750
782
|
"""
|
|
751
783
|
Run the Agent and return the RunOutput.
|
|
752
784
|
Steps:
|
|
753
|
-
1.
|
|
754
|
-
2.
|
|
755
|
-
3.
|
|
756
|
-
4.
|
|
757
|
-
5.
|
|
758
|
-
6.
|
|
759
|
-
7.
|
|
785
|
+
1. Execute pre-hooks
|
|
786
|
+
2. Prepare run messages
|
|
787
|
+
3. Reason about the task if reasoning is enabled
|
|
788
|
+
4. Generate a response from the Model (includes running function calls)
|
|
789
|
+
5. Update the RunOutput with the model response
|
|
790
|
+
6. Execute post-hooks
|
|
791
|
+
7. Calculate session metrics
|
|
760
792
|
8. Optional: Save output to file if save_response_to_file is set
|
|
793
|
+
9. Add RunOutput to Agent Session
|
|
794
|
+
10. Update Agent Memory
|
|
795
|
+
11. Save session to storage
|
|
761
796
|
"""
|
|
762
|
-
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
763
797
|
|
|
764
798
|
# Register run for cancellation tracking
|
|
765
799
|
register_run(run_response.run_id) # type: ignore
|
|
766
800
|
|
|
767
|
-
# 1.
|
|
801
|
+
# 1. Execute pre-hooks
|
|
802
|
+
run_input = cast(RunInput, run_response.input)
|
|
803
|
+
self.model = cast(Model, self.model)
|
|
804
|
+
if self.pre_hooks is not None:
|
|
805
|
+
# Can modify the run input
|
|
806
|
+
pre_hook_iterator = self._execute_pre_hooks(
|
|
807
|
+
hooks=self.pre_hooks, # type: ignore
|
|
808
|
+
run_response=run_response,
|
|
809
|
+
run_input=run_input,
|
|
810
|
+
session=session,
|
|
811
|
+
user_id=user_id,
|
|
812
|
+
debug_mode=debug_mode,
|
|
813
|
+
**kwargs,
|
|
814
|
+
)
|
|
815
|
+
# Consume the generator without yielding
|
|
816
|
+
deque(pre_hook_iterator, maxlen=0)
|
|
817
|
+
|
|
818
|
+
self._determine_tools_for_model(
|
|
819
|
+
model=self.model,
|
|
820
|
+
run_response=run_response,
|
|
821
|
+
session=session,
|
|
822
|
+
session_state=session_state,
|
|
823
|
+
dependencies=dependencies,
|
|
824
|
+
user_id=user_id,
|
|
825
|
+
async_mode=False,
|
|
826
|
+
knowledge_filters=knowledge_filters,
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
# 2. Prepare run messages
|
|
830
|
+
run_messages: RunMessages = self._get_run_messages(
|
|
831
|
+
run_response=run_response,
|
|
832
|
+
input=run_input.input_content,
|
|
833
|
+
session=session,
|
|
834
|
+
session_state=session_state,
|
|
835
|
+
user_id=user_id,
|
|
836
|
+
audio=run_input.audios,
|
|
837
|
+
images=run_input.images,
|
|
838
|
+
videos=run_input.videos,
|
|
839
|
+
files=run_input.files,
|
|
840
|
+
knowledge_filters=knowledge_filters,
|
|
841
|
+
add_history_to_context=add_history_to_context,
|
|
842
|
+
dependencies=dependencies,
|
|
843
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
844
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
845
|
+
metadata=metadata,
|
|
846
|
+
**kwargs,
|
|
847
|
+
)
|
|
848
|
+
if len(run_messages.messages) == 0:
|
|
849
|
+
log_error("No messages to be sent to the model.")
|
|
850
|
+
|
|
851
|
+
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
852
|
+
|
|
853
|
+
# 3. Reason about the task
|
|
768
854
|
self._handle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
769
855
|
|
|
770
856
|
# Check for cancellation before model call
|
|
771
857
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
772
858
|
|
|
773
|
-
#
|
|
859
|
+
# 4. Generate a response from the Model (includes running function calls)
|
|
774
860
|
self.model = cast(Model, self.model)
|
|
775
861
|
model_response: ModelResponse = self.model.response(
|
|
776
862
|
messages=run_messages.messages,
|
|
@@ -792,7 +878,7 @@ class Agent:
|
|
|
792
878
|
# If a parser model is provided, structure the response separately
|
|
793
879
|
self._parse_response_with_parser_model(model_response, run_messages)
|
|
794
880
|
|
|
795
|
-
#
|
|
881
|
+
# 5. Update the RunOutput with the model response
|
|
796
882
|
self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
|
|
797
883
|
|
|
798
884
|
if self.store_media:
|
|
@@ -806,9 +892,6 @@ class Agent:
|
|
|
806
892
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
807
893
|
)
|
|
808
894
|
|
|
809
|
-
# 4. Calculate session metrics
|
|
810
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
811
|
-
|
|
812
895
|
run_response.status = RunStatus.completed
|
|
813
896
|
|
|
814
897
|
# Convert the response to the structured format if needed
|
|
@@ -818,22 +901,36 @@ class Agent:
|
|
|
818
901
|
if run_response.metrics:
|
|
819
902
|
run_response.metrics.stop_timer()
|
|
820
903
|
|
|
821
|
-
#
|
|
904
|
+
# 6. Execute post-hooks after output is generated but before response is returned
|
|
905
|
+
if self.post_hooks is not None:
|
|
906
|
+
self._execute_post_hooks(
|
|
907
|
+
hooks=self.post_hooks, # type: ignore
|
|
908
|
+
run_output=run_response,
|
|
909
|
+
session=session,
|
|
910
|
+
user_id=user_id,
|
|
911
|
+
debug_mode=debug_mode,
|
|
912
|
+
**kwargs,
|
|
913
|
+
)
|
|
914
|
+
|
|
915
|
+
# 7. Calculate session metrics
|
|
916
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
917
|
+
|
|
918
|
+
# 8. Optional: Save output to file if save_response_to_file is set
|
|
822
919
|
self.save_run_response_to_file(
|
|
823
920
|
run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
|
|
824
921
|
)
|
|
825
922
|
|
|
826
|
-
#
|
|
923
|
+
# 9. Add the RunOutput to Agent Session
|
|
827
924
|
session.upsert_run(run=run_response)
|
|
828
925
|
|
|
829
|
-
#
|
|
926
|
+
# 10. Update Agent Memory
|
|
830
927
|
response_iterator = self._make_memories_and_summaries(
|
|
831
928
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
832
929
|
)
|
|
833
930
|
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
834
931
|
deque(response_iterator, maxlen=0)
|
|
835
932
|
|
|
836
|
-
#
|
|
933
|
+
# 11. Save session to memory
|
|
837
934
|
self.save_session(session=session)
|
|
838
935
|
|
|
839
936
|
# Log Agent Telemetry
|
|
@@ -849,42 +946,103 @@ class Agent:
|
|
|
849
946
|
def _run_stream(
|
|
850
947
|
self,
|
|
851
948
|
run_response: RunOutput,
|
|
852
|
-
run_messages: RunMessages,
|
|
853
949
|
session: AgentSession,
|
|
950
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
854
951
|
user_id: Optional[str] = None,
|
|
952
|
+
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
953
|
+
add_history_to_context: Optional[bool] = None,
|
|
954
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
955
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
956
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
957
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
855
958
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
856
959
|
stream_intermediate_steps: bool = False,
|
|
857
960
|
workflow_context: Optional[Dict] = None,
|
|
858
961
|
yield_run_response: bool = False,
|
|
962
|
+
debug_mode: Optional[bool] = None,
|
|
963
|
+
**kwargs: Any,
|
|
859
964
|
) -> Iterator[Union[RunOutputEvent, RunOutput]]:
|
|
860
965
|
"""Run the Agent and yield the RunOutput.
|
|
861
966
|
|
|
862
967
|
Steps:
|
|
863
|
-
1.
|
|
864
|
-
2.
|
|
865
|
-
3.
|
|
866
|
-
4.
|
|
867
|
-
5.
|
|
968
|
+
1. Execute pre-hooks
|
|
969
|
+
2. Prepare run messages
|
|
970
|
+
3. Reason about the task if reasoning is enabled
|
|
971
|
+
4. Generate a response from the Model (includes running function calls)
|
|
972
|
+
5. Calculate session metrics
|
|
868
973
|
6. Optional: Save output to file if save_response_to_file is set
|
|
869
974
|
7. Add the RunOutput to the Agent Session
|
|
975
|
+
8. Update Agent Memory
|
|
976
|
+
9. Save session to storage
|
|
870
977
|
"""
|
|
871
|
-
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
872
978
|
|
|
873
979
|
# Register run for cancellation tracking
|
|
874
980
|
register_run(run_response.run_id) # type: ignore
|
|
875
981
|
|
|
982
|
+
# 1. Execute pre-hooks
|
|
983
|
+
run_input = cast(RunInput, run_response.input)
|
|
984
|
+
self.model = cast(Model, self.model)
|
|
985
|
+
if self.pre_hooks is not None:
|
|
986
|
+
# Can modify the run input
|
|
987
|
+
pre_hook_iterator = self._execute_pre_hooks(
|
|
988
|
+
hooks=self.pre_hooks, # type: ignore
|
|
989
|
+
run_response=run_response,
|
|
990
|
+
run_input=run_input,
|
|
991
|
+
session=session,
|
|
992
|
+
user_id=user_id,
|
|
993
|
+
debug_mode=debug_mode,
|
|
994
|
+
**kwargs,
|
|
995
|
+
)
|
|
996
|
+
for event in pre_hook_iterator:
|
|
997
|
+
yield event
|
|
998
|
+
|
|
999
|
+
self._determine_tools_for_model(
|
|
1000
|
+
model=self.model,
|
|
1001
|
+
run_response=run_response,
|
|
1002
|
+
session=session,
|
|
1003
|
+
session_state=session_state,
|
|
1004
|
+
dependencies=dependencies,
|
|
1005
|
+
user_id=user_id,
|
|
1006
|
+
async_mode=False,
|
|
1007
|
+
knowledge_filters=knowledge_filters,
|
|
1008
|
+
)
|
|
1009
|
+
|
|
1010
|
+
# 2. Prepare run messages
|
|
1011
|
+
run_messages: RunMessages = self._get_run_messages(
|
|
1012
|
+
run_response=run_response,
|
|
1013
|
+
input=run_input.input_content,
|
|
1014
|
+
session=session,
|
|
1015
|
+
session_state=session_state,
|
|
1016
|
+
user_id=user_id,
|
|
1017
|
+
audio=run_input.audios,
|
|
1018
|
+
images=run_input.images,
|
|
1019
|
+
videos=run_input.videos,
|
|
1020
|
+
files=run_input.files,
|
|
1021
|
+
knowledge_filters=knowledge_filters,
|
|
1022
|
+
add_history_to_context=add_history_to_context,
|
|
1023
|
+
dependencies=dependencies,
|
|
1024
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
1025
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
1026
|
+
metadata=metadata,
|
|
1027
|
+
**kwargs,
|
|
1028
|
+
)
|
|
1029
|
+
if len(run_messages.messages) == 0:
|
|
1030
|
+
log_error("No messages to be sent to the model.")
|
|
1031
|
+
|
|
1032
|
+
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1033
|
+
|
|
876
1034
|
try:
|
|
877
1035
|
# Start the Run by yielding a RunStarted event
|
|
878
1036
|
if stream_intermediate_steps:
|
|
879
1037
|
yield self._handle_event(create_run_started_event(run_response), run_response, workflow_context)
|
|
880
1038
|
|
|
881
|
-
#
|
|
1039
|
+
# 3. Reason about the task if reasoning is enabled
|
|
882
1040
|
yield from self._handle_reasoning_stream(run_response=run_response, run_messages=run_messages)
|
|
883
1041
|
|
|
884
1042
|
# Check for cancellation before model processing
|
|
885
1043
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
886
1044
|
|
|
887
|
-
#
|
|
1045
|
+
# 4. Process model response
|
|
888
1046
|
if self.output_model is None:
|
|
889
1047
|
for event in self._handle_model_response_stream(
|
|
890
1048
|
session=session,
|
|
@@ -946,20 +1104,21 @@ class Agent:
|
|
|
946
1104
|
)
|
|
947
1105
|
return
|
|
948
1106
|
|
|
949
|
-
# 3. Calculate session metrics
|
|
950
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
951
|
-
|
|
952
1107
|
run_response.status = RunStatus.completed
|
|
953
1108
|
|
|
954
|
-
completed_event = self._handle_event(
|
|
955
|
-
create_run_completed_event(from_run_response=run_response), run_response, workflow_context
|
|
956
|
-
)
|
|
957
|
-
|
|
958
1109
|
# Set the run duration
|
|
959
1110
|
if run_response.metrics:
|
|
960
1111
|
run_response.metrics.stop_timer()
|
|
961
1112
|
|
|
962
|
-
#
|
|
1113
|
+
# TODO: For now we don't run post-hooks during streaming
|
|
1114
|
+
|
|
1115
|
+
# 5. Calculate session metrics
|
|
1116
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
1117
|
+
|
|
1118
|
+
completed_event = self._handle_event(
|
|
1119
|
+
create_run_completed_event(from_run_response=run_response), run_response, workflow_context
|
|
1120
|
+
)
|
|
1121
|
+
# 6. Optional: Save output to file if save_response_to_file is set
|
|
963
1122
|
self.save_run_response_to_file(
|
|
964
1123
|
run_response=run_response,
|
|
965
1124
|
input=run_messages.user_message,
|
|
@@ -967,15 +1126,15 @@ class Agent:
|
|
|
967
1126
|
user_id=user_id,
|
|
968
1127
|
)
|
|
969
1128
|
|
|
970
|
-
#
|
|
1129
|
+
# 7. Add RunOutput to Agent Session
|
|
971
1130
|
session.upsert_run(run=run_response)
|
|
972
1131
|
|
|
973
|
-
#
|
|
1132
|
+
# 8. Update Agent Memory
|
|
974
1133
|
yield from self._make_memories_and_summaries(
|
|
975
1134
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
976
1135
|
)
|
|
977
1136
|
|
|
978
|
-
#
|
|
1137
|
+
# 9. Save session to storage
|
|
979
1138
|
self.save_session(session=session)
|
|
980
1139
|
|
|
981
1140
|
if stream_intermediate_steps:
|
|
@@ -1092,6 +1251,14 @@ class Agent:
|
|
|
1092
1251
|
# Validate input against input_schema if provided
|
|
1093
1252
|
validated_input = self._validate_input(input)
|
|
1094
1253
|
|
|
1254
|
+
# Normalise hook & guardails
|
|
1255
|
+
if not self._hooks_normalised:
|
|
1256
|
+
if self.pre_hooks:
|
|
1257
|
+
self.pre_hooks = normalize_hooks(self.pre_hooks)
|
|
1258
|
+
if self.post_hooks:
|
|
1259
|
+
self.post_hooks = normalize_hooks(self.post_hooks)
|
|
1260
|
+
self._hooks_normalised = True
|
|
1261
|
+
|
|
1095
1262
|
session_id, user_id, session_state = self._initialize_session(
|
|
1096
1263
|
run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
|
|
1097
1264
|
)
|
|
@@ -1105,7 +1272,7 @@ class Agent:
|
|
|
1105
1272
|
|
|
1106
1273
|
# Create RunInput to capture the original user input
|
|
1107
1274
|
run_input = RunInput(
|
|
1108
|
-
input_content=
|
|
1275
|
+
input_content=validated_input,
|
|
1109
1276
|
images=image_artifacts,
|
|
1110
1277
|
videos=video_artifacts,
|
|
1111
1278
|
audios=audio_artifacts,
|
|
@@ -1191,17 +1358,6 @@ class Agent:
|
|
|
1191
1358
|
run_response.metrics = Metrics()
|
|
1192
1359
|
run_response.metrics.start_timer()
|
|
1193
1360
|
|
|
1194
|
-
self._determine_tools_for_model(
|
|
1195
|
-
model=self.model,
|
|
1196
|
-
run_response=run_response,
|
|
1197
|
-
session=agent_session,
|
|
1198
|
-
session_state=session_state,
|
|
1199
|
-
dependencies=run_dependencies,
|
|
1200
|
-
user_id=user_id,
|
|
1201
|
-
async_mode=False,
|
|
1202
|
-
knowledge_filters=effective_filters,
|
|
1203
|
-
)
|
|
1204
|
-
|
|
1205
1361
|
# If no retries are set, use the agent's default retries
|
|
1206
1362
|
retries = retries if retries is not None else self.retries
|
|
1207
1363
|
|
|
@@ -1210,48 +1366,46 @@ class Agent:
|
|
|
1210
1366
|
|
|
1211
1367
|
for attempt in range(num_attempts):
|
|
1212
1368
|
try:
|
|
1213
|
-
# Prepare run messages
|
|
1214
|
-
run_messages: RunMessages = self._get_run_messages(
|
|
1215
|
-
run_response=run_response,
|
|
1216
|
-
input=validated_input,
|
|
1217
|
-
session=agent_session,
|
|
1218
|
-
session_state=session_state,
|
|
1219
|
-
user_id=user_id,
|
|
1220
|
-
audio=audio,
|
|
1221
|
-
images=images,
|
|
1222
|
-
videos=videos,
|
|
1223
|
-
files=files,
|
|
1224
|
-
knowledge_filters=effective_filters,
|
|
1225
|
-
add_history_to_context=add_history,
|
|
1226
|
-
dependencies=run_dependencies,
|
|
1227
|
-
add_dependencies_to_context=add_dependencies,
|
|
1228
|
-
add_session_state_to_context=add_session_state,
|
|
1229
|
-
**kwargs,
|
|
1230
|
-
)
|
|
1231
|
-
if len(run_messages.messages) == 0:
|
|
1232
|
-
log_error("No messages to be sent to the model.")
|
|
1233
|
-
|
|
1234
1369
|
if stream:
|
|
1235
1370
|
response_iterator = self._run_stream(
|
|
1236
1371
|
run_response=run_response,
|
|
1237
|
-
run_messages=run_messages,
|
|
1238
|
-
user_id=user_id,
|
|
1239
1372
|
session=agent_session,
|
|
1373
|
+
session_state=session_state,
|
|
1374
|
+
user_id=user_id,
|
|
1375
|
+
knowledge_filters=effective_filters,
|
|
1376
|
+
add_history_to_context=add_history,
|
|
1377
|
+
add_dependencies_to_context=add_dependencies,
|
|
1378
|
+
add_session_state_to_context=add_session_state,
|
|
1379
|
+
metadata=metadata,
|
|
1380
|
+
dependencies=run_dependencies,
|
|
1240
1381
|
response_format=response_format,
|
|
1241
1382
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
1242
1383
|
workflow_context=workflow_context,
|
|
1243
1384
|
yield_run_response=yield_run_response,
|
|
1385
|
+
debug_mode=debug_mode,
|
|
1386
|
+
**kwargs,
|
|
1244
1387
|
)
|
|
1245
1388
|
return response_iterator
|
|
1246
1389
|
else:
|
|
1247
1390
|
response = self._run(
|
|
1248
1391
|
run_response=run_response,
|
|
1249
|
-
run_messages=run_messages,
|
|
1250
|
-
user_id=user_id,
|
|
1251
1392
|
session=agent_session,
|
|
1393
|
+
session_state=session_state,
|
|
1394
|
+
user_id=user_id,
|
|
1395
|
+
knowledge_filters=effective_filters,
|
|
1396
|
+
add_history_to_context=add_history,
|
|
1397
|
+
add_dependencies_to_context=add_dependencies,
|
|
1398
|
+
add_session_state_to_context=add_session_state,
|
|
1399
|
+
metadata=metadata,
|
|
1400
|
+
dependencies=run_dependencies,
|
|
1252
1401
|
response_format=response_format,
|
|
1402
|
+
debug_mode=debug_mode,
|
|
1403
|
+
**kwargs,
|
|
1253
1404
|
)
|
|
1254
1405
|
return response
|
|
1406
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
1407
|
+
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
|
|
1408
|
+
raise e
|
|
1255
1409
|
except ModelProviderError as e:
|
|
1256
1410
|
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
1257
1411
|
if isinstance(e, StopAgentRun):
|
|
@@ -1304,14 +1458,9 @@ class Agent:
|
|
|
1304
1458
|
async def _arun(
|
|
1305
1459
|
self,
|
|
1306
1460
|
run_response: RunOutput,
|
|
1307
|
-
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1308
1461
|
session: AgentSession,
|
|
1309
1462
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1310
1463
|
user_id: Optional[str] = None,
|
|
1311
|
-
images: Optional[Sequence[Image]] = None,
|
|
1312
|
-
videos: Optional[Sequence[Video]] = None,
|
|
1313
|
-
audio: Optional[Sequence[Audio]] = None,
|
|
1314
|
-
files: Optional[Sequence[File]] = None,
|
|
1315
1464
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1316
1465
|
add_history_to_context: Optional[bool] = None,
|
|
1317
1466
|
add_dependencies_to_context: Optional[bool] = None,
|
|
@@ -1319,36 +1468,72 @@ class Agent:
|
|
|
1319
1468
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1320
1469
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1321
1470
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1471
|
+
debug_mode: Optional[bool] = None,
|
|
1322
1472
|
**kwargs: Any,
|
|
1323
1473
|
) -> RunOutput:
|
|
1324
1474
|
"""Run the Agent and yield the RunOutput.
|
|
1325
1475
|
|
|
1326
1476
|
Steps:
|
|
1327
1477
|
1. Resolve dependencies
|
|
1328
|
-
2.
|
|
1329
|
-
3.
|
|
1330
|
-
4.
|
|
1331
|
-
5.
|
|
1332
|
-
6. Update
|
|
1333
|
-
7.
|
|
1334
|
-
8.
|
|
1335
|
-
9. Save
|
|
1478
|
+
2. Execute pre-hooks
|
|
1479
|
+
3. Prepare run messages
|
|
1480
|
+
4. Reason about the task if reasoning is enabled
|
|
1481
|
+
5. Generate a response from the Model (includes running function calls)
|
|
1482
|
+
6. Update the RunOutput with the model response
|
|
1483
|
+
7. Execute post-hooks
|
|
1484
|
+
8. Calculate session metrics
|
|
1485
|
+
9. Save output to file
|
|
1486
|
+
10. Add RunOutput to Agent Session
|
|
1487
|
+
11. Update Agent Memory
|
|
1488
|
+
12. Save session to storage
|
|
1336
1489
|
"""
|
|
1490
|
+
# Register run for cancellation tracking
|
|
1491
|
+
register_run(run_response.run_id) # type: ignore
|
|
1492
|
+
|
|
1337
1493
|
# 1. Resolving here for async requirement
|
|
1338
1494
|
if dependencies is not None:
|
|
1339
1495
|
await self._aresolve_run_dependencies(dependencies)
|
|
1340
1496
|
|
|
1341
|
-
# 2.
|
|
1497
|
+
# 2. Execute pre-hooks
|
|
1498
|
+
run_input = cast(RunInput, run_response.input)
|
|
1499
|
+
self.model = cast(Model, self.model)
|
|
1500
|
+
if self.pre_hooks is not None:
|
|
1501
|
+
# Can modify the run input
|
|
1502
|
+
pre_hook_iterator = self._aexecute_pre_hooks(
|
|
1503
|
+
hooks=self.pre_hooks, # type: ignore
|
|
1504
|
+
run_response=run_response,
|
|
1505
|
+
run_input=run_input,
|
|
1506
|
+
session=session,
|
|
1507
|
+
user_id=user_id,
|
|
1508
|
+
debug_mode=debug_mode,
|
|
1509
|
+
**kwargs,
|
|
1510
|
+
)
|
|
1511
|
+
# Consume the async iterator without yielding
|
|
1512
|
+
async for _ in pre_hook_iterator:
|
|
1513
|
+
pass
|
|
1514
|
+
|
|
1515
|
+
self._determine_tools_for_model(
|
|
1516
|
+
model=self.model,
|
|
1517
|
+
run_response=run_response,
|
|
1518
|
+
session=session,
|
|
1519
|
+
session_state=session_state,
|
|
1520
|
+
dependencies=dependencies,
|
|
1521
|
+
user_id=user_id,
|
|
1522
|
+
async_mode=True,
|
|
1523
|
+
knowledge_filters=knowledge_filters,
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
# 3. Prepare run messages
|
|
1342
1527
|
run_messages: RunMessages = self._get_run_messages(
|
|
1343
1528
|
run_response=run_response,
|
|
1344
|
-
input=
|
|
1529
|
+
input=run_input.input_content,
|
|
1345
1530
|
session=session,
|
|
1346
1531
|
session_state=session_state,
|
|
1347
1532
|
user_id=user_id,
|
|
1348
|
-
audio=
|
|
1349
|
-
images=images,
|
|
1350
|
-
videos=videos,
|
|
1351
|
-
files=files,
|
|
1533
|
+
audio=run_input.audios,
|
|
1534
|
+
images=run_input.images,
|
|
1535
|
+
videos=run_input.videos,
|
|
1536
|
+
files=run_input.files,
|
|
1352
1537
|
knowledge_filters=knowledge_filters,
|
|
1353
1538
|
add_history_to_context=add_history_to_context,
|
|
1354
1539
|
dependencies=dependencies,
|
|
@@ -1362,17 +1547,13 @@ class Agent:
|
|
|
1362
1547
|
|
|
1363
1548
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1364
1549
|
|
|
1365
|
-
#
|
|
1366
|
-
register_run(run_response.run_id) # type: ignore
|
|
1367
|
-
|
|
1368
|
-
self.model = cast(Model, self.model)
|
|
1369
|
-
# 3. Reason about the task if reasoning is enabled
|
|
1550
|
+
# 4. Reason about the task if reasoning is enabled
|
|
1370
1551
|
await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
1371
1552
|
|
|
1372
1553
|
# Check for cancellation before model call
|
|
1373
1554
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1374
1555
|
|
|
1375
|
-
#
|
|
1556
|
+
# 5. Generate a response from the Model (includes running function calls)
|
|
1376
1557
|
model_response: ModelResponse = await self.model.aresponse(
|
|
1377
1558
|
messages=run_messages.messages,
|
|
1378
1559
|
tools=self._tools_for_model,
|
|
@@ -1392,7 +1573,7 @@ class Agent:
|
|
|
1392
1573
|
# If a parser model is provided, structure the response separately
|
|
1393
1574
|
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1394
1575
|
|
|
1395
|
-
#
|
|
1576
|
+
# 6. Update the RunOutput with the model response
|
|
1396
1577
|
self._update_run_response(model_response=model_response, run_response=run_response, run_messages=run_messages)
|
|
1397
1578
|
|
|
1398
1579
|
if self.store_media:
|
|
@@ -1406,9 +1587,6 @@ class Agent:
|
|
|
1406
1587
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
1407
1588
|
)
|
|
1408
1589
|
|
|
1409
|
-
# 6. Calculate session metrics
|
|
1410
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
1411
|
-
|
|
1412
1590
|
run_response.status = RunStatus.completed
|
|
1413
1591
|
|
|
1414
1592
|
# Convert the response to the structured format if needed
|
|
@@ -1418,21 +1596,35 @@ class Agent:
|
|
|
1418
1596
|
if run_response.metrics:
|
|
1419
1597
|
run_response.metrics.stop_timer()
|
|
1420
1598
|
|
|
1421
|
-
#
|
|
1599
|
+
# 7. Execute post-hooks after output is generated but before response is returned
|
|
1600
|
+
if self.post_hooks is not None:
|
|
1601
|
+
await self._aexecute_post_hooks(
|
|
1602
|
+
hooks=self.post_hooks, # type: ignore
|
|
1603
|
+
run_output=run_response,
|
|
1604
|
+
session=session,
|
|
1605
|
+
user_id=user_id,
|
|
1606
|
+
debug_mode=debug_mode,
|
|
1607
|
+
**kwargs,
|
|
1608
|
+
)
|
|
1609
|
+
|
|
1610
|
+
# 8. Calculate session metrics
|
|
1611
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
1612
|
+
|
|
1613
|
+
# 9. Optional: Save output to file if save_response_to_file is set
|
|
1422
1614
|
self.save_run_response_to_file(
|
|
1423
1615
|
run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
|
|
1424
1616
|
)
|
|
1425
1617
|
|
|
1426
|
-
#
|
|
1618
|
+
# 10. Add RunOutput to Agent Session
|
|
1427
1619
|
session.upsert_run(run=run_response)
|
|
1428
1620
|
|
|
1429
|
-
#
|
|
1621
|
+
# 11. Update Agent Memory
|
|
1430
1622
|
async for _ in self._amake_memories_and_summaries(
|
|
1431
1623
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
1432
1624
|
):
|
|
1433
1625
|
pass
|
|
1434
1626
|
|
|
1435
|
-
#
|
|
1627
|
+
# 12. Save session to storage
|
|
1436
1628
|
self.save_session(session=session)
|
|
1437
1629
|
|
|
1438
1630
|
# Log Agent Telemetry
|
|
@@ -1449,52 +1641,79 @@ class Agent:
|
|
|
1449
1641
|
self,
|
|
1450
1642
|
run_response: RunOutput,
|
|
1451
1643
|
session: AgentSession,
|
|
1452
|
-
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1453
1644
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1454
|
-
|
|
1455
|
-
images: Optional[Sequence[Image]] = None,
|
|
1456
|
-
videos: Optional[Sequence[Video]] = None,
|
|
1457
|
-
files: Optional[Sequence[File]] = None,
|
|
1645
|
+
user_id: Optional[str] = None,
|
|
1458
1646
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1459
1647
|
add_history_to_context: Optional[bool] = None,
|
|
1460
1648
|
add_dependencies_to_context: Optional[bool] = None,
|
|
1461
1649
|
add_session_state_to_context: Optional[bool] = None,
|
|
1462
1650
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1463
1651
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1464
|
-
user_id: Optional[str] = None,
|
|
1465
1652
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1466
1653
|
stream_intermediate_steps: bool = False,
|
|
1467
1654
|
workflow_context: Optional[Dict] = None,
|
|
1468
1655
|
yield_run_response: Optional[bool] = None,
|
|
1656
|
+
debug_mode: Optional[bool] = None,
|
|
1469
1657
|
**kwargs: Any,
|
|
1470
1658
|
) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
|
|
1471
1659
|
"""Run the Agent and yield the RunOutput.
|
|
1472
1660
|
|
|
1473
1661
|
Steps:
|
|
1474
1662
|
1. Resolve dependencies
|
|
1475
|
-
2.
|
|
1476
|
-
3.
|
|
1477
|
-
4.
|
|
1478
|
-
5.
|
|
1663
|
+
2. Execute pre-hooks
|
|
1664
|
+
3. Prepare run messages
|
|
1665
|
+
4. Reason about the task if reasoning is enabled
|
|
1666
|
+
5. Generate a response from the Model (includes running function calls)
|
|
1479
1667
|
6. Calculate session metrics
|
|
1480
1668
|
7. Add RunOutput to Agent Session
|
|
1481
|
-
8.
|
|
1669
|
+
8. Update Agent Memory
|
|
1670
|
+
9. Save session to storage
|
|
1482
1671
|
"""
|
|
1672
|
+
|
|
1483
1673
|
# 1. Resolving here for async requirement
|
|
1484
1674
|
if dependencies is not None:
|
|
1485
1675
|
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
1486
1676
|
|
|
1487
|
-
# 2.
|
|
1677
|
+
# 2. Execute pre-hooks
|
|
1678
|
+
run_input = cast(RunInput, run_response.input)
|
|
1679
|
+
self.model = cast(Model, self.model)
|
|
1680
|
+
|
|
1681
|
+
if self.pre_hooks is not None:
|
|
1682
|
+
# Can modify the run input
|
|
1683
|
+
pre_hook_iterator = self._aexecute_pre_hooks(
|
|
1684
|
+
hooks=self.pre_hooks, # type: ignore
|
|
1685
|
+
run_response=run_response,
|
|
1686
|
+
run_input=run_input,
|
|
1687
|
+
session=session,
|
|
1688
|
+
user_id=user_id,
|
|
1689
|
+
debug_mode=debug_mode,
|
|
1690
|
+
**kwargs,
|
|
1691
|
+
)
|
|
1692
|
+
async for event in pre_hook_iterator:
|
|
1693
|
+
yield event
|
|
1694
|
+
|
|
1695
|
+
self._determine_tools_for_model(
|
|
1696
|
+
model=self.model,
|
|
1697
|
+
run_response=run_response,
|
|
1698
|
+
session=session,
|
|
1699
|
+
session_state=session_state,
|
|
1700
|
+
dependencies=dependencies,
|
|
1701
|
+
user_id=user_id,
|
|
1702
|
+
async_mode=True,
|
|
1703
|
+
knowledge_filters=knowledge_filters,
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
# 3. Prepare run messages
|
|
1488
1707
|
run_messages: RunMessages = self._get_run_messages(
|
|
1489
1708
|
run_response=run_response,
|
|
1490
|
-
input=
|
|
1709
|
+
input=run_input.input_content,
|
|
1491
1710
|
session=session,
|
|
1492
1711
|
session_state=session_state,
|
|
1493
1712
|
user_id=user_id,
|
|
1494
|
-
audio=
|
|
1495
|
-
images=images,
|
|
1496
|
-
videos=videos,
|
|
1497
|
-
files=files,
|
|
1713
|
+
audio=run_input.audios,
|
|
1714
|
+
images=run_input.images,
|
|
1715
|
+
videos=run_input.videos,
|
|
1716
|
+
files=run_input.files,
|
|
1498
1717
|
knowledge_filters=knowledge_filters,
|
|
1499
1718
|
add_history_to_context=add_history_to_context,
|
|
1500
1719
|
dependencies=dependencies,
|
|
@@ -1514,7 +1733,7 @@ class Agent:
|
|
|
1514
1733
|
if stream_intermediate_steps:
|
|
1515
1734
|
yield self._handle_event(create_run_started_event(run_response), run_response, workflow_context)
|
|
1516
1735
|
|
|
1517
|
-
#
|
|
1736
|
+
# 4. Reason about the task if reasoning is enabled
|
|
1518
1737
|
async for item in self._ahandle_reasoning_stream(run_response=run_response, run_messages=run_messages):
|
|
1519
1738
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1520
1739
|
yield item
|
|
@@ -1522,7 +1741,7 @@ class Agent:
|
|
|
1522
1741
|
# Check for cancellation before model processing
|
|
1523
1742
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1524
1743
|
|
|
1525
|
-
#
|
|
1744
|
+
# 5. Generate a response from the Model
|
|
1526
1745
|
if self.output_model is None:
|
|
1527
1746
|
async for event in self._ahandle_model_response_stream(
|
|
1528
1747
|
session=session,
|
|
@@ -1586,18 +1805,18 @@ class Agent:
|
|
|
1586
1805
|
yield item
|
|
1587
1806
|
return
|
|
1588
1807
|
|
|
1589
|
-
# 5. Calculate session metrics
|
|
1590
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
1591
|
-
|
|
1592
1808
|
run_response.status = RunStatus.completed
|
|
1593
1809
|
|
|
1810
|
+
# Set the run duration
|
|
1811
|
+
if run_response.metrics:
|
|
1812
|
+
run_response.metrics.stop_timer()
|
|
1813
|
+
|
|
1594
1814
|
completed_event = self._handle_event(
|
|
1595
1815
|
create_run_completed_event(from_run_response=run_response), run_response, workflow_context
|
|
1596
1816
|
)
|
|
1597
1817
|
|
|
1598
|
-
#
|
|
1599
|
-
|
|
1600
|
-
run_response.metrics.stop_timer()
|
|
1818
|
+
# 6. Calculate session metrics
|
|
1819
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
1601
1820
|
|
|
1602
1821
|
# Optional: Save output to file if save_response_to_file is set
|
|
1603
1822
|
self.save_run_response_to_file(
|
|
@@ -1607,16 +1826,16 @@ class Agent:
|
|
|
1607
1826
|
user_id=user_id,
|
|
1608
1827
|
)
|
|
1609
1828
|
|
|
1610
|
-
#
|
|
1829
|
+
# 7. Add RunOutput to Agent Session
|
|
1611
1830
|
session.upsert_run(run=run_response)
|
|
1612
1831
|
|
|
1613
|
-
#
|
|
1832
|
+
# 8. Update Agent Memory
|
|
1614
1833
|
async for event in self._amake_memories_and_summaries(
|
|
1615
1834
|
run_response=run_response, run_messages=run_messages, session=session, user_id=user_id
|
|
1616
1835
|
):
|
|
1617
1836
|
yield event
|
|
1618
1837
|
|
|
1619
|
-
#
|
|
1838
|
+
# 9. Save session to storage
|
|
1620
1839
|
self.save_session(session=session)
|
|
1621
1840
|
|
|
1622
1841
|
if stream_intermediate_steps:
|
|
@@ -1732,6 +1951,14 @@ class Agent:
|
|
|
1732
1951
|
# Validate input against input_schema if provided
|
|
1733
1952
|
validated_input = self._validate_input(input)
|
|
1734
1953
|
|
|
1954
|
+
# Normalise hook & guardails
|
|
1955
|
+
if not self._hooks_normalised:
|
|
1956
|
+
if self.pre_hooks:
|
|
1957
|
+
self.pre_hooks = normalize_hooks(self.pre_hooks, async_mode=True)
|
|
1958
|
+
if self.post_hooks:
|
|
1959
|
+
self.post_hooks = normalize_hooks(self.post_hooks, async_mode=True)
|
|
1960
|
+
self._hooks_normalised = True
|
|
1961
|
+
|
|
1735
1962
|
session_id, user_id, session_state = self._initialize_session(
|
|
1736
1963
|
run_id=run_id, session_id=session_id, user_id=user_id, session_state=session_state
|
|
1737
1964
|
)
|
|
@@ -1745,7 +1972,7 @@ class Agent:
|
|
|
1745
1972
|
|
|
1746
1973
|
# Create RunInput to capture the original user input
|
|
1747
1974
|
run_input = RunInput(
|
|
1748
|
-
input_content=
|
|
1975
|
+
input_content=validated_input,
|
|
1749
1976
|
images=image_artifacts,
|
|
1750
1977
|
videos=video_artifacts,
|
|
1751
1978
|
audios=audio_artifacts,
|
|
@@ -1807,6 +2034,9 @@ class Agent:
|
|
|
1807
2034
|
else:
|
|
1808
2035
|
merge_dictionaries(metadata, self.metadata)
|
|
1809
2036
|
|
|
2037
|
+
# If no retries are set, use the agent's default retries
|
|
2038
|
+
retries = retries if retries is not None else self.retries
|
|
2039
|
+
|
|
1810
2040
|
# Create a new run_response for this attempt
|
|
1811
2041
|
run_response = RunOutput(
|
|
1812
2042
|
run_id=run_id,
|
|
@@ -1825,20 +2055,6 @@ class Agent:
|
|
|
1825
2055
|
run_response.metrics = Metrics()
|
|
1826
2056
|
run_response.metrics.start_timer()
|
|
1827
2057
|
|
|
1828
|
-
self._determine_tools_for_model(
|
|
1829
|
-
model=self.model,
|
|
1830
|
-
run_response=run_response,
|
|
1831
|
-
session=agent_session,
|
|
1832
|
-
session_state=session_state,
|
|
1833
|
-
dependencies=run_dependencies,
|
|
1834
|
-
user_id=user_id,
|
|
1835
|
-
async_mode=True,
|
|
1836
|
-
knowledge_filters=effective_filters,
|
|
1837
|
-
)
|
|
1838
|
-
|
|
1839
|
-
# If no retries are set, use the agent's default retries
|
|
1840
|
-
retries = retries if retries is not None else self.retries
|
|
1841
|
-
|
|
1842
2058
|
last_exception = None
|
|
1843
2059
|
num_attempts = retries + 1
|
|
1844
2060
|
|
|
@@ -1848,15 +2064,10 @@ class Agent:
|
|
|
1848
2064
|
if stream:
|
|
1849
2065
|
return self._arun_stream( # type: ignore
|
|
1850
2066
|
run_response=run_response,
|
|
1851
|
-
input=validated_input,
|
|
1852
|
-
user_id=user_id,
|
|
1853
2067
|
session=agent_session,
|
|
2068
|
+
user_id=user_id,
|
|
1854
2069
|
session_state=session_state,
|
|
1855
|
-
|
|
1856
|
-
images=images,
|
|
1857
|
-
videos=videos,
|
|
1858
|
-
files=files,
|
|
1859
|
-
knowledge_filters=knowledge_filters,
|
|
2070
|
+
knowledge_filters=effective_filters,
|
|
1860
2071
|
add_history_to_context=add_history,
|
|
1861
2072
|
add_dependencies_to_context=add_dependencies,
|
|
1862
2073
|
add_session_state_to_context=add_session_state,
|
|
@@ -1866,19 +2077,15 @@ class Agent:
|
|
|
1866
2077
|
workflow_context=workflow_context,
|
|
1867
2078
|
yield_run_response=yield_run_response,
|
|
1868
2079
|
dependencies=run_dependencies,
|
|
2080
|
+
debug_mode=debug_mode,
|
|
1869
2081
|
**kwargs,
|
|
1870
2082
|
) # type: ignore[assignment]
|
|
1871
2083
|
else:
|
|
1872
2084
|
return self._arun( # type: ignore
|
|
1873
2085
|
run_response=run_response,
|
|
1874
|
-
input=validated_input,
|
|
1875
2086
|
user_id=user_id,
|
|
1876
2087
|
session=agent_session,
|
|
1877
2088
|
session_state=session_state,
|
|
1878
|
-
audio=audio,
|
|
1879
|
-
images=images,
|
|
1880
|
-
videos=videos,
|
|
1881
|
-
files=files,
|
|
1882
2089
|
knowledge_filters=knowledge_filters,
|
|
1883
2090
|
add_history_to_context=add_history,
|
|
1884
2091
|
add_dependencies_to_context=add_dependencies,
|
|
@@ -1889,8 +2096,13 @@ class Agent:
|
|
|
1889
2096
|
workflow_context=workflow_context,
|
|
1890
2097
|
yield_run_response=yield_run_response,
|
|
1891
2098
|
dependencies=run_dependencies,
|
|
2099
|
+
debug_mode=debug_mode,
|
|
1892
2100
|
**kwargs,
|
|
1893
2101
|
)
|
|
2102
|
+
|
|
2103
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
2104
|
+
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
|
|
2105
|
+
raise e
|
|
1894
2106
|
except ModelProviderError as e:
|
|
1895
2107
|
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
1896
2108
|
if isinstance(e, StopAgentRun):
|
|
@@ -1988,6 +2200,7 @@ class Agent:
|
|
|
1988
2200
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
1989
2201
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1990
2202
|
debug_mode: Optional[bool] = None,
|
|
2203
|
+
**kwargs,
|
|
1991
2204
|
) -> Union[RunOutput, Iterator[RunOutputEvent]]:
|
|
1992
2205
|
"""Continue a previous run.
|
|
1993
2206
|
|
|
@@ -2086,6 +2299,7 @@ class Agent:
|
|
|
2086
2299
|
run_response=run_response,
|
|
2087
2300
|
session=agent_session,
|
|
2088
2301
|
session_state=session_state,
|
|
2302
|
+
dependencies=run_dependencies,
|
|
2089
2303
|
user_id=user_id,
|
|
2090
2304
|
async_mode=False,
|
|
2091
2305
|
knowledge_filters=effective_filters,
|
|
@@ -2124,6 +2338,8 @@ class Agent:
|
|
|
2124
2338
|
user_id=user_id,
|
|
2125
2339
|
session=agent_session,
|
|
2126
2340
|
response_format=response_format,
|
|
2341
|
+
debug_mode=debug_mode,
|
|
2342
|
+
**kwargs,
|
|
2127
2343
|
)
|
|
2128
2344
|
return response
|
|
2129
2345
|
except ModelProviderError as e:
|
|
@@ -2170,6 +2386,8 @@ class Agent:
|
|
|
2170
2386
|
session: AgentSession,
|
|
2171
2387
|
user_id: Optional[str] = None,
|
|
2172
2388
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2389
|
+
debug_mode: Optional[bool] = None,
|
|
2390
|
+
**kwargs,
|
|
2173
2391
|
) -> RunOutput:
|
|
2174
2392
|
"""Continue a previous run.
|
|
2175
2393
|
|
|
@@ -2218,6 +2436,16 @@ class Agent:
|
|
|
2218
2436
|
if run_response.metrics:
|
|
2219
2437
|
run_response.metrics.stop_timer()
|
|
2220
2438
|
|
|
2439
|
+
if self.post_hooks is not None:
|
|
2440
|
+
self._execute_post_hooks(
|
|
2441
|
+
hooks=self.post_hooks, # type: ignore
|
|
2442
|
+
run_output=run_response,
|
|
2443
|
+
session=session,
|
|
2444
|
+
user_id=user_id,
|
|
2445
|
+
debug_mode=debug_mode,
|
|
2446
|
+
**kwargs,
|
|
2447
|
+
)
|
|
2448
|
+
|
|
2221
2449
|
# 4. Save output to file if save_response_to_file is set
|
|
2222
2450
|
self.save_run_response_to_file(
|
|
2223
2451
|
run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
|
|
@@ -2373,6 +2601,7 @@ class Agent:
|
|
|
2373
2601
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
2374
2602
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2375
2603
|
debug_mode: Optional[bool] = None,
|
|
2604
|
+
**kwargs,
|
|
2376
2605
|
) -> Union[RunOutput, AsyncIterator[Union[RunOutputEvent, RunOutput]]]:
|
|
2377
2606
|
"""Continue a previous run.
|
|
2378
2607
|
|
|
@@ -2413,10 +2642,6 @@ class Agent:
|
|
|
2413
2642
|
|
|
2414
2643
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
2415
2644
|
|
|
2416
|
-
# Resolve dependencies
|
|
2417
|
-
if run_dependencies is not None:
|
|
2418
|
-
self._resolve_run_dependencies(dependencies=run_dependencies)
|
|
2419
|
-
|
|
2420
2645
|
effective_filters = knowledge_filters
|
|
2421
2646
|
|
|
2422
2647
|
# When filters are passed manually
|
|
@@ -2508,6 +2733,8 @@ class Agent:
|
|
|
2508
2733
|
session=agent_session,
|
|
2509
2734
|
response_format=response_format,
|
|
2510
2735
|
dependencies=run_dependencies,
|
|
2736
|
+
debug_mode=debug_mode,
|
|
2737
|
+
**kwargs,
|
|
2511
2738
|
)
|
|
2512
2739
|
except ModelProviderError as e:
|
|
2513
2740
|
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
|
@@ -2553,6 +2780,8 @@ class Agent:
|
|
|
2553
2780
|
user_id: Optional[str] = None,
|
|
2554
2781
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2555
2782
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2783
|
+
debug_mode: Optional[bool] = None,
|
|
2784
|
+
**kwargs,
|
|
2556
2785
|
) -> RunOutput:
|
|
2557
2786
|
"""Continue a previous run.
|
|
2558
2787
|
|
|
@@ -2604,6 +2833,16 @@ class Agent:
|
|
|
2604
2833
|
if run_response.metrics:
|
|
2605
2834
|
run_response.metrics.stop_timer()
|
|
2606
2835
|
|
|
2836
|
+
if self.post_hooks is not None:
|
|
2837
|
+
await self._aexecute_post_hooks(
|
|
2838
|
+
hooks=self.post_hooks, # type: ignore
|
|
2839
|
+
run_output=run_response,
|
|
2840
|
+
session=session,
|
|
2841
|
+
user_id=user_id,
|
|
2842
|
+
debug_mode=debug_mode,
|
|
2843
|
+
**kwargs,
|
|
2844
|
+
)
|
|
2845
|
+
|
|
2607
2846
|
# 4. Save output to file if save_response_to_file is set
|
|
2608
2847
|
self.save_run_response_to_file(
|
|
2609
2848
|
run_response=run_response, input=run_messages.user_message, session_id=session.session_id, user_id=user_id
|
|
@@ -2649,12 +2888,9 @@ class Agent:
|
|
|
2649
2888
|
6. Save output to file if save_response_to_file is set
|
|
2650
2889
|
7. Save session to storage
|
|
2651
2890
|
"""
|
|
2652
|
-
# Resolving here for async requirement
|
|
2653
|
-
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
2654
|
-
|
|
2655
2891
|
# Resolve dependencies
|
|
2656
|
-
if
|
|
2657
|
-
await self._aresolve_run_dependencies(dependencies=
|
|
2892
|
+
if dependencies is not None:
|
|
2893
|
+
await self._aresolve_run_dependencies(dependencies=dependencies)
|
|
2658
2894
|
|
|
2659
2895
|
# Start the Run by yielding a RunContinued event
|
|
2660
2896
|
if stream_intermediate_steps:
|
|
@@ -2718,6 +2954,202 @@ class Agent:
|
|
|
2718
2954
|
|
|
2719
2955
|
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
2720
2956
|
|
|
2957
|
+
def _execute_pre_hooks(
|
|
2958
|
+
self,
|
|
2959
|
+
hooks: Optional[List[Callable[..., Any]]],
|
|
2960
|
+
run_response: RunOutput,
|
|
2961
|
+
run_input: RunInput,
|
|
2962
|
+
session: AgentSession,
|
|
2963
|
+
user_id: Optional[str] = None,
|
|
2964
|
+
debug_mode: Optional[bool] = None,
|
|
2965
|
+
**kwargs: Any,
|
|
2966
|
+
) -> Iterator[RunOutputEvent]:
|
|
2967
|
+
"""Execute multiple pre-hook functions in succession."""
|
|
2968
|
+
if hooks is None:
|
|
2969
|
+
return
|
|
2970
|
+
|
|
2971
|
+
# Prepare all possible arguments once
|
|
2972
|
+
all_args = {
|
|
2973
|
+
"run_input": run_input,
|
|
2974
|
+
"agent": self,
|
|
2975
|
+
"session": session,
|
|
2976
|
+
"user_id": user_id,
|
|
2977
|
+
"debug_mode": debug_mode or self.debug_mode,
|
|
2978
|
+
}
|
|
2979
|
+
all_args.update(kwargs)
|
|
2980
|
+
|
|
2981
|
+
for i, hook in enumerate(hooks):
|
|
2982
|
+
yield self._handle_event(
|
|
2983
|
+
run_response=run_response,
|
|
2984
|
+
event=create_pre_hook_started_event(
|
|
2985
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
2986
|
+
),
|
|
2987
|
+
)
|
|
2988
|
+
try:
|
|
2989
|
+
# Filter arguments to only include those that the hook accepts
|
|
2990
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
2991
|
+
|
|
2992
|
+
hook(**filtered_args)
|
|
2993
|
+
|
|
2994
|
+
yield self._handle_event(
|
|
2995
|
+
run_response=run_response,
|
|
2996
|
+
event=create_pre_hook_completed_event(
|
|
2997
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
2998
|
+
),
|
|
2999
|
+
)
|
|
3000
|
+
|
|
3001
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
3002
|
+
raise e
|
|
3003
|
+
except Exception as e:
|
|
3004
|
+
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
|
|
3005
|
+
log_exception(e)
|
|
3006
|
+
finally:
|
|
3007
|
+
# Reset global log mode incase an agent in the pre-hook changed it
|
|
3008
|
+
self._set_debug(debug_mode=debug_mode)
|
|
3009
|
+
|
|
3010
|
+
# Update the input on the run_response
|
|
3011
|
+
run_response.input = run_input
|
|
3012
|
+
|
|
3013
|
+
async def _aexecute_pre_hooks(
|
|
3014
|
+
self,
|
|
3015
|
+
hooks: Optional[List[Callable[..., Any]]],
|
|
3016
|
+
run_response: RunOutput,
|
|
3017
|
+
run_input: RunInput,
|
|
3018
|
+
session: AgentSession,
|
|
3019
|
+
user_id: Optional[str] = None,
|
|
3020
|
+
debug_mode: Optional[bool] = None,
|
|
3021
|
+
**kwargs: Any,
|
|
3022
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
3023
|
+
"""Execute multiple pre-hook functions in succession (async version)."""
|
|
3024
|
+
if hooks is None:
|
|
3025
|
+
return
|
|
3026
|
+
|
|
3027
|
+
# Prepare all possible arguments once
|
|
3028
|
+
all_args = {
|
|
3029
|
+
"run_input": run_input,
|
|
3030
|
+
"agent": self,
|
|
3031
|
+
"session": session,
|
|
3032
|
+
"user_id": user_id,
|
|
3033
|
+
"debug_mode": debug_mode or self.debug_mode,
|
|
3034
|
+
}
|
|
3035
|
+
all_args.update(kwargs)
|
|
3036
|
+
|
|
3037
|
+
for i, hook in enumerate(hooks):
|
|
3038
|
+
yield self._handle_event(
|
|
3039
|
+
run_response=run_response,
|
|
3040
|
+
event=create_pre_hook_started_event(
|
|
3041
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
3042
|
+
),
|
|
3043
|
+
)
|
|
3044
|
+
try:
|
|
3045
|
+
# Filter arguments to only include those that the hook accepts
|
|
3046
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
3047
|
+
|
|
3048
|
+
if asyncio.iscoroutinefunction(hook):
|
|
3049
|
+
await hook(**filtered_args)
|
|
3050
|
+
else:
|
|
3051
|
+
# Synchronous function
|
|
3052
|
+
hook(**filtered_args)
|
|
3053
|
+
|
|
3054
|
+
yield self._handle_event(
|
|
3055
|
+
run_response=run_response,
|
|
3056
|
+
event=create_pre_hook_completed_event(
|
|
3057
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
3058
|
+
),
|
|
3059
|
+
)
|
|
3060
|
+
|
|
3061
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
3062
|
+
raise e
|
|
3063
|
+
except Exception as e:
|
|
3064
|
+
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
|
|
3065
|
+
log_exception(e)
|
|
3066
|
+
finally:
|
|
3067
|
+
# Reset global log mode incase an agent in the pre-hook changed it
|
|
3068
|
+
self._set_debug(debug_mode=debug_mode)
|
|
3069
|
+
|
|
3070
|
+
# Update the input on the run_response
|
|
3071
|
+
run_response.input = run_input
|
|
3072
|
+
|
|
3073
|
+
def _execute_post_hooks(
|
|
3074
|
+
self,
|
|
3075
|
+
hooks: Optional[List[Callable[..., Any]]],
|
|
3076
|
+
run_output: RunOutput,
|
|
3077
|
+
session: AgentSession,
|
|
3078
|
+
user_id: Optional[str] = None,
|
|
3079
|
+
debug_mode: Optional[bool] = None,
|
|
3080
|
+
**kwargs: Any,
|
|
3081
|
+
) -> None:
|
|
3082
|
+
"""Execute multiple post-hook functions in succession."""
|
|
3083
|
+
if hooks is None:
|
|
3084
|
+
return
|
|
3085
|
+
|
|
3086
|
+
# Prepare all possible arguments once
|
|
3087
|
+
all_args = {
|
|
3088
|
+
"run_output": run_output,
|
|
3089
|
+
"agent": self,
|
|
3090
|
+
"session": session,
|
|
3091
|
+
"user_id": user_id,
|
|
3092
|
+
"debug_mode": debug_mode or self.debug_mode,
|
|
3093
|
+
}
|
|
3094
|
+
all_args.update(kwargs)
|
|
3095
|
+
|
|
3096
|
+
for i, hook in enumerate(hooks):
|
|
3097
|
+
try:
|
|
3098
|
+
# Filter arguments to only include those that the hook accepts
|
|
3099
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
3100
|
+
|
|
3101
|
+
hook(**filtered_args)
|
|
3102
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
3103
|
+
raise e
|
|
3104
|
+
except Exception as e:
|
|
3105
|
+
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
|
|
3106
|
+
log_exception(e)
|
|
3107
|
+
finally:
|
|
3108
|
+
# Reset global log mode incase an agent in the pre-hook changed it
|
|
3109
|
+
self._set_debug(debug_mode=debug_mode)
|
|
3110
|
+
|
|
3111
|
+
async def _aexecute_post_hooks(
|
|
3112
|
+
self,
|
|
3113
|
+
hooks: Optional[List[Callable[..., Any]]],
|
|
3114
|
+
run_output: RunOutput,
|
|
3115
|
+
session: AgentSession,
|
|
3116
|
+
user_id: Optional[str] = None,
|
|
3117
|
+
debug_mode: Optional[bool] = None,
|
|
3118
|
+
**kwargs: Any,
|
|
3119
|
+
) -> None:
|
|
3120
|
+
"""Execute multiple post-hook functions in succession (async version)."""
|
|
3121
|
+
if hooks is None:
|
|
3122
|
+
return
|
|
3123
|
+
|
|
3124
|
+
# Prepare all possible arguments once
|
|
3125
|
+
all_args = {
|
|
3126
|
+
"run_output": run_output,
|
|
3127
|
+
"agent": self,
|
|
3128
|
+
"session": session,
|
|
3129
|
+
"user_id": user_id,
|
|
3130
|
+
"debug_mode": debug_mode or self.debug_mode,
|
|
3131
|
+
}
|
|
3132
|
+
all_args.update(kwargs)
|
|
3133
|
+
|
|
3134
|
+
for i, hook in enumerate(hooks):
|
|
3135
|
+
try:
|
|
3136
|
+
# Filter arguments to only include those that the hook accepts
|
|
3137
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
3138
|
+
|
|
3139
|
+
if asyncio.iscoroutinefunction(hook):
|
|
3140
|
+
await hook(**filtered_args)
|
|
3141
|
+
else:
|
|
3142
|
+
hook(**filtered_args)
|
|
3143
|
+
|
|
3144
|
+
except (InputCheckError, OutputCheckError) as e:
|
|
3145
|
+
raise e
|
|
3146
|
+
except Exception as e:
|
|
3147
|
+
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
|
|
3148
|
+
log_exception(e)
|
|
3149
|
+
finally:
|
|
3150
|
+
# Reset global log mode incase an agent in the pre-hook changed it
|
|
3151
|
+
self._set_debug(debug_mode=debug_mode)
|
|
3152
|
+
|
|
2721
3153
|
def _handle_agent_run_paused(
|
|
2722
3154
|
self,
|
|
2723
3155
|
run_response: RunOutput,
|
|
@@ -4812,7 +5244,7 @@ class Agent:
|
|
|
4812
5244
|
|
|
4813
5245
|
# 3.2.5 Add information about agentic filters if enabled
|
|
4814
5246
|
if self.knowledge is not None and self.enable_agentic_knowledge_filters:
|
|
4815
|
-
valid_filters =
|
|
5247
|
+
valid_filters = self.knowledge.get_valid_filters()
|
|
4816
5248
|
if valid_filters:
|
|
4817
5249
|
valid_filters_str = ", ".join(valid_filters)
|
|
4818
5250
|
additional_information.append(
|
|
@@ -6948,7 +7380,6 @@ class Agent:
|
|
|
6948
7380
|
|
|
6949
7381
|
if self.output_schema is not None:
|
|
6950
7382
|
markdown = False
|
|
6951
|
-
markdown = False
|
|
6952
7383
|
|
|
6953
7384
|
if stream is None:
|
|
6954
7385
|
stream = self.stream or False
|