agno 2.1.10__py3-none-any.whl → 2.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1594 -1248
- agno/knowledge/knowledge.py +11 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +14 -0
- agno/knowledge/types.py +1 -0
- agno/models/anthropic/claude.py +2 -2
- agno/models/base.py +4 -4
- agno/models/ollama/chat.py +7 -2
- agno/os/app.py +1 -1
- agno/os/interfaces/a2a/router.py +2 -2
- agno/os/interfaces/agui/router.py +2 -2
- agno/os/router.py +7 -7
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/health.py +6 -2
- agno/os/routers/knowledge/schemas.py +49 -47
- agno/os/routers/memory/schemas.py +16 -16
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +382 -7
- agno/os/schema.py +254 -231
- agno/os/utils.py +1 -1
- agno/run/agent.py +54 -1
- agno/run/team.py +48 -0
- agno/run/workflow.py +15 -5
- agno/session/summary.py +45 -13
- agno/session/team.py +90 -5
- agno/team/team.py +1130 -849
- agno/utils/agent.py +372 -0
- agno/utils/events.py +144 -2
- agno/utils/message.py +60 -0
- agno/utils/print_response/agent.py +10 -6
- agno/utils/print_response/team.py +6 -4
- agno/utils/print_response/workflow.py +7 -5
- agno/utils/team.py +9 -8
- agno/workflow/condition.py +17 -9
- agno/workflow/loop.py +18 -10
- agno/workflow/parallel.py +14 -6
- agno/workflow/router.py +16 -8
- agno/workflow/step.py +14 -6
- agno/workflow/steps.py +14 -6
- agno/workflow/workflow.py +331 -123
- {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/METADATA +63 -23
- {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/RECORD +45 -43
- {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/WHEEL +0 -0
- {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -64,15 +64,28 @@ from agno.run.cancel import (
|
|
|
64
64
|
)
|
|
65
65
|
from agno.run.messages import RunMessages
|
|
66
66
|
from agno.run.team import TeamRunOutputEvent
|
|
67
|
-
from agno.session import AgentSession, SessionSummaryManager
|
|
67
|
+
from agno.session import AgentSession, SessionSummaryManager, TeamSession, WorkflowSession
|
|
68
68
|
from agno.tools import Toolkit
|
|
69
69
|
from agno.tools.function import Function
|
|
70
|
+
from agno.utils.agent import (
|
|
71
|
+
await_for_background_tasks,
|
|
72
|
+
await_for_background_tasks_stream,
|
|
73
|
+
collect_joint_audios,
|
|
74
|
+
collect_joint_files,
|
|
75
|
+
collect_joint_images,
|
|
76
|
+
collect_joint_videos,
|
|
77
|
+
scrub_history_messages_from_run_output,
|
|
78
|
+
scrub_media_from_run_output,
|
|
79
|
+
scrub_tool_results_from_run_output,
|
|
80
|
+
wait_for_background_tasks,
|
|
81
|
+
wait_for_background_tasks_stream,
|
|
82
|
+
)
|
|
70
83
|
from agno.utils.common import is_typed_dict, validate_typed_dict
|
|
71
84
|
from agno.utils.events import (
|
|
72
|
-
create_memory_update_completed_event,
|
|
73
|
-
create_memory_update_started_event,
|
|
74
85
|
create_parser_model_response_completed_event,
|
|
75
86
|
create_parser_model_response_started_event,
|
|
87
|
+
create_post_hook_completed_event,
|
|
88
|
+
create_post_hook_started_event,
|
|
76
89
|
create_pre_hook_completed_event,
|
|
77
90
|
create_pre_hook_started_event,
|
|
78
91
|
create_reasoning_completed_event,
|
|
@@ -80,13 +93,17 @@ from agno.utils.events import (
|
|
|
80
93
|
create_reasoning_step_event,
|
|
81
94
|
create_run_cancelled_event,
|
|
82
95
|
create_run_completed_event,
|
|
96
|
+
create_run_content_completed_event,
|
|
83
97
|
create_run_continued_event,
|
|
84
98
|
create_run_error_event,
|
|
85
99
|
create_run_output_content_event,
|
|
86
100
|
create_run_paused_event,
|
|
87
101
|
create_run_started_event,
|
|
102
|
+
create_session_summary_completed_event,
|
|
103
|
+
create_session_summary_started_event,
|
|
88
104
|
create_tool_call_completed_event,
|
|
89
105
|
create_tool_call_started_event,
|
|
106
|
+
handle_event,
|
|
90
107
|
)
|
|
91
108
|
from agno.utils.hooks import filter_hook_args, normalize_hooks
|
|
92
109
|
from agno.utils.knowledge import get_agentic_or_user_search_filters
|
|
@@ -100,7 +117,7 @@ from agno.utils.log import (
|
|
|
100
117
|
set_log_level_to_info,
|
|
101
118
|
)
|
|
102
119
|
from agno.utils.merge_dict import merge_dictionaries
|
|
103
|
-
from agno.utils.message import get_text_from_message
|
|
120
|
+
from agno.utils.message import filter_tool_calls, get_text_from_message
|
|
104
121
|
from agno.utils.print_response.agent import (
|
|
105
122
|
aprint_response,
|
|
106
123
|
aprint_response_stream,
|
|
@@ -186,6 +203,8 @@ class Agent:
|
|
|
186
203
|
add_history_to_context: bool = False
|
|
187
204
|
# Number of historical runs to include in the messages
|
|
188
205
|
num_history_runs: int = 3
|
|
206
|
+
# Maximum number of tool calls to include from history (None = no limit)
|
|
207
|
+
max_tool_calls_from_history: Optional[int] = None
|
|
189
208
|
|
|
190
209
|
# --- Knowledge ---
|
|
191
210
|
knowledge: Optional[Knowledge] = None
|
|
@@ -332,7 +351,9 @@ class Agent:
|
|
|
332
351
|
# Stream the response from the Agent
|
|
333
352
|
stream: Optional[bool] = None
|
|
334
353
|
# Stream the intermediate steps from the Agent
|
|
335
|
-
|
|
354
|
+
stream_events: Optional[bool] = None
|
|
355
|
+
# [Deprecated] Stream the intermediate steps from the Agent
|
|
356
|
+
stream_intermediate_steps: Optional[bool] = None
|
|
336
357
|
|
|
337
358
|
# Persist the events on the run response
|
|
338
359
|
store_events: bool = False
|
|
@@ -400,6 +421,7 @@ class Agent:
|
|
|
400
421
|
session_summary_manager: Optional[SessionSummaryManager] = None,
|
|
401
422
|
add_history_to_context: bool = False,
|
|
402
423
|
num_history_runs: int = 3,
|
|
424
|
+
max_tool_calls_from_history: Optional[int] = None,
|
|
403
425
|
store_media: bool = True,
|
|
404
426
|
store_tool_messages: bool = True,
|
|
405
427
|
store_history_messages: bool = True,
|
|
@@ -456,7 +478,8 @@ class Agent:
|
|
|
456
478
|
use_json_mode: bool = False,
|
|
457
479
|
save_response_to_file: Optional[str] = None,
|
|
458
480
|
stream: Optional[bool] = None,
|
|
459
|
-
|
|
481
|
+
stream_events: Optional[bool] = None,
|
|
482
|
+
stream_intermediate_steps: Optional[bool] = None,
|
|
460
483
|
store_events: bool = False,
|
|
461
484
|
events_to_skip: Optional[List[RunEvent]] = None,
|
|
462
485
|
role: Optional[str] = None,
|
|
@@ -500,11 +523,7 @@ class Agent:
|
|
|
500
523
|
|
|
501
524
|
self.add_history_to_context = add_history_to_context
|
|
502
525
|
self.num_history_runs = num_history_runs
|
|
503
|
-
|
|
504
|
-
if add_history_to_context and not db:
|
|
505
|
-
log_warning(
|
|
506
|
-
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
507
|
-
)
|
|
526
|
+
self.max_tool_calls_from_history = max_tool_calls_from_history
|
|
508
527
|
|
|
509
528
|
self.store_media = store_media
|
|
510
529
|
self.store_tool_messages = store_tool_messages
|
|
@@ -574,7 +593,7 @@ class Agent:
|
|
|
574
593
|
self.save_response_to_file = save_response_to_file
|
|
575
594
|
|
|
576
595
|
self.stream = stream
|
|
577
|
-
self.
|
|
596
|
+
self.stream_events = stream_events or stream_intermediate_steps
|
|
578
597
|
|
|
579
598
|
self.store_events = store_events
|
|
580
599
|
self.role = role
|
|
@@ -607,6 +626,22 @@ class Agent:
|
|
|
607
626
|
|
|
608
627
|
self._hooks_normalised = False
|
|
609
628
|
|
|
629
|
+
# Lazy-initialized shared thread pool executor for background tasks (memory, cultural knowledge, etc.)
|
|
630
|
+
self._background_executor: Optional[Any] = None
|
|
631
|
+
|
|
632
|
+
@property
|
|
633
|
+
def background_executor(self) -> Any:
|
|
634
|
+
"""Lazy initialization of shared thread pool executor for background tasks.
|
|
635
|
+
|
|
636
|
+
Handles both memory creation and cultural knowledge updates concurrently.
|
|
637
|
+
Initialized only on first use (runtime, not instantiation) and reused across runs.
|
|
638
|
+
"""
|
|
639
|
+
if self._background_executor is None:
|
|
640
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
641
|
+
|
|
642
|
+
self._background_executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix="agno-bg")
|
|
643
|
+
return self._background_executor
|
|
644
|
+
|
|
610
645
|
def set_id(self) -> None:
|
|
611
646
|
if self.id is None:
|
|
612
647
|
self.id = generate_id_from_name(self.name)
|
|
@@ -779,11 +814,9 @@ class Agent:
|
|
|
779
814
|
|
|
780
815
|
def _initialize_session(
|
|
781
816
|
self,
|
|
782
|
-
run_id: str,
|
|
783
817
|
session_id: Optional[str] = None,
|
|
784
818
|
user_id: Optional[str] = None,
|
|
785
|
-
|
|
786
|
-
) -> Tuple[str, Optional[str], Dict[str, Any]]:
|
|
819
|
+
) -> Tuple[str, Optional[str]]:
|
|
787
820
|
"""Initialize the session for the agent."""
|
|
788
821
|
|
|
789
822
|
if session_id is None:
|
|
@@ -800,26 +833,23 @@ class Agent:
|
|
|
800
833
|
if user_id is None or user_id == "":
|
|
801
834
|
user_id = self.user_id
|
|
802
835
|
|
|
803
|
-
|
|
804
|
-
if session_state is None:
|
|
805
|
-
session_state = self.session_state or {}
|
|
806
|
-
else:
|
|
807
|
-
# If run session_state is provided, merge agent defaults under it
|
|
808
|
-
# This ensures run state takes precedence over agent defaults
|
|
809
|
-
if self.session_state:
|
|
810
|
-
base_state = self.session_state.copy()
|
|
811
|
-
merge_dictionaries(base_state, session_state)
|
|
812
|
-
session_state.clear()
|
|
813
|
-
session_state.update(base_state)
|
|
836
|
+
return session_id, user_id
|
|
814
837
|
|
|
815
|
-
|
|
838
|
+
def _initialize_session_state(
|
|
839
|
+
self,
|
|
840
|
+
session_state: Dict[str, Any],
|
|
841
|
+
user_id: Optional[str] = None,
|
|
842
|
+
session_id: Optional[str] = None,
|
|
843
|
+
run_id: Optional[str] = None,
|
|
844
|
+
) -> Dict[str, Any]:
|
|
845
|
+
"""Initialize the session state for the agent."""
|
|
846
|
+
if user_id:
|
|
816
847
|
session_state["current_user_id"] = user_id
|
|
817
848
|
if session_id is not None:
|
|
818
849
|
session_state["current_session_id"] = session_id
|
|
819
850
|
if run_id is not None:
|
|
820
851
|
session_state["current_run_id"] = run_id
|
|
821
|
-
|
|
822
|
-
return session_id, user_id, session_state # type: ignore
|
|
852
|
+
return session_state
|
|
823
853
|
|
|
824
854
|
def _run(
|
|
825
855
|
self,
|
|
@@ -841,16 +871,18 @@ class Agent:
|
|
|
841
871
|
|
|
842
872
|
Steps:
|
|
843
873
|
1. Execute pre-hooks
|
|
844
|
-
2.
|
|
845
|
-
3.
|
|
846
|
-
4.
|
|
847
|
-
5.
|
|
848
|
-
6.
|
|
849
|
-
7.
|
|
850
|
-
8.
|
|
851
|
-
9.
|
|
852
|
-
10.
|
|
853
|
-
11.
|
|
874
|
+
2. Determine tools for model
|
|
875
|
+
3. Prepare run messages
|
|
876
|
+
4. Start memory creation in background thread
|
|
877
|
+
5. Reason about the task if reasoning is enabled
|
|
878
|
+
6. Generate a response from the Model (includes running function calls)
|
|
879
|
+
7. Update the RunOutput with the model response
|
|
880
|
+
8. Store media if enabled
|
|
881
|
+
9. Convert the response to the structured format if needed
|
|
882
|
+
10. Execute post-hooks
|
|
883
|
+
11. Wait for background memory creation and cultural knowledge creation
|
|
884
|
+
12. Create session summary
|
|
885
|
+
13. Cleanup and store the run response and session
|
|
854
886
|
"""
|
|
855
887
|
|
|
856
888
|
# Register run for cancellation tracking
|
|
@@ -876,6 +908,7 @@ class Agent:
|
|
|
876
908
|
# Consume the generator without yielding
|
|
877
909
|
deque(pre_hook_iterator, maxlen=0)
|
|
878
910
|
|
|
911
|
+
# 2. Determine tools for model
|
|
879
912
|
self._determine_tools_for_model(
|
|
880
913
|
model=self.model,
|
|
881
914
|
run_response=run_response,
|
|
@@ -887,7 +920,7 @@ class Agent:
|
|
|
887
920
|
knowledge_filters=knowledge_filters,
|
|
888
921
|
)
|
|
889
922
|
|
|
890
|
-
#
|
|
923
|
+
# 3. Prepare run messages
|
|
891
924
|
run_messages: RunMessages = self._get_run_messages(
|
|
892
925
|
run_response=run_response,
|
|
893
926
|
input=run_input.input_content,
|
|
@@ -911,114 +944,132 @@ class Agent:
|
|
|
911
944
|
|
|
912
945
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
913
946
|
|
|
914
|
-
#
|
|
915
|
-
|
|
947
|
+
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
948
|
+
memory_future = None
|
|
949
|
+
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
950
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
951
|
+
log_debug("Starting memory creation in background thread.")
|
|
952
|
+
memory_future = self.background_executor.submit(
|
|
953
|
+
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
954
|
+
)
|
|
916
955
|
|
|
917
|
-
#
|
|
918
|
-
|
|
956
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
957
|
+
cultural_knowledge_future = None
|
|
958
|
+
if (
|
|
959
|
+
run_messages.user_message is not None
|
|
960
|
+
and self.culture_manager is not None
|
|
961
|
+
and self.update_cultural_knowledge
|
|
962
|
+
):
|
|
963
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
964
|
+
cultural_knowledge_future = self.background_executor.submit(
|
|
965
|
+
self._make_cultural_knowledge, run_messages=run_messages
|
|
966
|
+
)
|
|
919
967
|
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
functions=self._functions_for_model,
|
|
926
|
-
tool_choice=self.tool_choice,
|
|
927
|
-
tool_call_limit=self.tool_call_limit,
|
|
928
|
-
response_format=response_format,
|
|
929
|
-
run_response=run_response,
|
|
930
|
-
send_media_to_model=self.send_media_to_model,
|
|
931
|
-
)
|
|
968
|
+
try:
|
|
969
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
970
|
+
|
|
971
|
+
# 5. Reason about the task
|
|
972
|
+
self._handle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
932
973
|
|
|
933
|
-
|
|
934
|
-
|
|
974
|
+
# Check for cancellation before model call
|
|
975
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
935
976
|
|
|
936
|
-
|
|
937
|
-
|
|
977
|
+
# 6. Generate a response from the Model (includes running function calls)
|
|
978
|
+
self.model = cast(Model, self.model)
|
|
979
|
+
model_response: ModelResponse = self.model.response(
|
|
980
|
+
messages=run_messages.messages,
|
|
981
|
+
tools=self._tools_for_model,
|
|
982
|
+
functions=self._functions_for_model,
|
|
983
|
+
tool_choice=self.tool_choice,
|
|
984
|
+
tool_call_limit=self.tool_call_limit,
|
|
985
|
+
response_format=response_format,
|
|
986
|
+
run_response=run_response,
|
|
987
|
+
send_media_to_model=self.send_media_to_model,
|
|
988
|
+
)
|
|
938
989
|
|
|
939
|
-
|
|
940
|
-
|
|
990
|
+
# Check for cancellation after model call
|
|
991
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
941
992
|
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
model_response=model_response,
|
|
945
|
-
run_response=run_response,
|
|
946
|
-
run_messages=run_messages,
|
|
947
|
-
)
|
|
993
|
+
# If an output model is provided, generate output using the output model
|
|
994
|
+
self._generate_response_with_output_model(model_response, run_messages)
|
|
948
995
|
|
|
949
|
-
|
|
950
|
-
self.
|
|
996
|
+
# If a parser model is provided, structure the response separately
|
|
997
|
+
self._parse_response_with_parser_model(model_response, run_messages)
|
|
951
998
|
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
run_response=run_response,
|
|
956
|
-
run_messages=run_messages,
|
|
957
|
-
session=session,
|
|
958
|
-
user_id=user_id,
|
|
999
|
+
# 7. Update the RunOutput with the model response
|
|
1000
|
+
self._update_run_response(
|
|
1001
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
959
1002
|
)
|
|
960
1003
|
|
|
961
|
-
|
|
962
|
-
|
|
1004
|
+
# We should break out of the run function
|
|
1005
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1006
|
+
wait_for_background_tasks(
|
|
1007
|
+
memory_future=memory_future, cultural_knowledge_future=cultural_knowledge_future
|
|
1008
|
+
)
|
|
963
1009
|
|
|
964
|
-
|
|
965
|
-
if self.post_hooks is not None:
|
|
966
|
-
self._execute_post_hooks(
|
|
967
|
-
hooks=self.post_hooks, # type: ignore
|
|
968
|
-
run_output=run_response,
|
|
969
|
-
session_state=session_state,
|
|
970
|
-
dependencies=dependencies,
|
|
971
|
-
metadata=metadata,
|
|
972
|
-
session=session,
|
|
973
|
-
user_id=user_id,
|
|
974
|
-
debug_mode=debug_mode,
|
|
975
|
-
**kwargs,
|
|
976
|
-
)
|
|
977
|
-
run_response.status = RunStatus.completed
|
|
978
|
-
# Stop the timer for the Run duration
|
|
979
|
-
if run_response.metrics:
|
|
980
|
-
run_response.metrics.stop_timer()
|
|
1010
|
+
return self._handle_agent_run_paused(run_response=run_response, session=session, user_id=user_id)
|
|
981
1011
|
|
|
982
|
-
|
|
983
|
-
|
|
1012
|
+
# 8. Store media if enabled
|
|
1013
|
+
if self.store_media:
|
|
1014
|
+
self._store_media(run_response, model_response)
|
|
984
1015
|
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
run_response=run_response,
|
|
988
|
-
input=run_messages.user_message,
|
|
989
|
-
session_id=session.session_id,
|
|
990
|
-
user_id=user_id,
|
|
991
|
-
)
|
|
1016
|
+
# 9. Convert the response to the structured format if needed
|
|
1017
|
+
self._convert_response_to_structured_format(run_response)
|
|
992
1018
|
|
|
993
|
-
|
|
994
|
-
|
|
1019
|
+
# 10. Execute post-hooks after output is generated but before response is returned
|
|
1020
|
+
if self.post_hooks is not None:
|
|
1021
|
+
post_hook_iterator = self._execute_post_hooks(
|
|
1022
|
+
hooks=self.post_hooks, # type: ignore
|
|
1023
|
+
run_output=run_response,
|
|
1024
|
+
session=session,
|
|
1025
|
+
user_id=user_id,
|
|
1026
|
+
session_state=session_state,
|
|
1027
|
+
dependencies=dependencies,
|
|
1028
|
+
metadata=metadata,
|
|
1029
|
+
debug_mode=debug_mode,
|
|
1030
|
+
**kwargs,
|
|
1031
|
+
)
|
|
1032
|
+
deque(post_hook_iterator, maxlen=0)
|
|
995
1033
|
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
run_response=run_response,
|
|
999
|
-
run_messages=run_messages,
|
|
1000
|
-
session=session,
|
|
1001
|
-
user_id=user_id,
|
|
1002
|
-
)
|
|
1003
|
-
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
1004
|
-
deque(response_iterator, maxlen=0)
|
|
1034
|
+
# Check for cancellation
|
|
1035
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1005
1036
|
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
session.upsert_run(run=run_response)
|
|
1037
|
+
# 11. Wait for background memory creation and cultural knowledge creation
|
|
1038
|
+
wait_for_background_tasks(memory_future=memory_future, cultural_knowledge_future=cultural_knowledge_future)
|
|
1009
1039
|
|
|
1010
|
-
|
|
1011
|
-
|
|
1040
|
+
# 12. Create session summary
|
|
1041
|
+
if self.session_summary_manager is not None:
|
|
1042
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1043
|
+
session.upsert_run(run=run_response)
|
|
1044
|
+
try:
|
|
1045
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
1046
|
+
except Exception as e:
|
|
1047
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1048
|
+
|
|
1049
|
+
run_response.status = RunStatus.completed
|
|
1012
1050
|
|
|
1013
|
-
|
|
1014
|
-
|
|
1051
|
+
# 13. Cleanup and store the run response and session
|
|
1052
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1015
1053
|
|
|
1016
|
-
|
|
1054
|
+
# Log Agent Telemetry
|
|
1055
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
1017
1056
|
|
|
1018
|
-
|
|
1019
|
-
cleanup_run(run_response.run_id) # type: ignore
|
|
1057
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
1020
1058
|
|
|
1021
|
-
|
|
1059
|
+
return run_response
|
|
1060
|
+
except RunCancelledException as e:
|
|
1061
|
+
# Handle run cancellation
|
|
1062
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
1063
|
+
run_response.content = str(e)
|
|
1064
|
+
run_response.status = RunStatus.cancelled
|
|
1065
|
+
|
|
1066
|
+
# Cleanup and store the run response and session
|
|
1067
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1068
|
+
|
|
1069
|
+
return run_response
|
|
1070
|
+
finally:
|
|
1071
|
+
# Always clean up the run tracking
|
|
1072
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
1022
1073
|
|
|
1023
1074
|
def _run_stream(
|
|
1024
1075
|
self,
|
|
@@ -1033,7 +1084,7 @@ class Agent:
|
|
|
1033
1084
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1034
1085
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1035
1086
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1036
|
-
|
|
1087
|
+
stream_events: bool = False,
|
|
1037
1088
|
yield_run_response: bool = False,
|
|
1038
1089
|
debug_mode: Optional[bool] = None,
|
|
1039
1090
|
**kwargs: Any,
|
|
@@ -1042,15 +1093,15 @@ class Agent:
|
|
|
1042
1093
|
|
|
1043
1094
|
Steps:
|
|
1044
1095
|
1. Execute pre-hooks
|
|
1045
|
-
2.
|
|
1046
|
-
3.
|
|
1047
|
-
4.
|
|
1048
|
-
5.
|
|
1049
|
-
6.
|
|
1050
|
-
7.
|
|
1051
|
-
8.
|
|
1052
|
-
9. Create
|
|
1053
|
-
10.
|
|
1096
|
+
2. Determine tools for model
|
|
1097
|
+
3. Prepare run messages
|
|
1098
|
+
4. Start memory creation in background thread
|
|
1099
|
+
5. Reason about the task if reasoning is enabled
|
|
1100
|
+
6. Process model response
|
|
1101
|
+
7. Parse response with parser model if provided
|
|
1102
|
+
8. Wait for background memory creation and cultural knowledge creation
|
|
1103
|
+
9. Create session summary
|
|
1104
|
+
10. Cleanup and store the run response and session
|
|
1054
1105
|
"""
|
|
1055
1106
|
|
|
1056
1107
|
# Register run for cancellation tracking
|
|
@@ -1076,6 +1127,7 @@ class Agent:
|
|
|
1076
1127
|
for event in pre_hook_iterator:
|
|
1077
1128
|
yield event
|
|
1078
1129
|
|
|
1130
|
+
# 2. Determine tools for model
|
|
1079
1131
|
self._determine_tools_for_model(
|
|
1080
1132
|
model=self.model,
|
|
1081
1133
|
run_response=run_response,
|
|
@@ -1087,7 +1139,7 @@ class Agent:
|
|
|
1087
1139
|
knowledge_filters=knowledge_filters,
|
|
1088
1140
|
)
|
|
1089
1141
|
|
|
1090
|
-
#
|
|
1142
|
+
# 3. Prepare run messages
|
|
1091
1143
|
run_messages: RunMessages = self._get_run_messages(
|
|
1092
1144
|
run_response=run_response,
|
|
1093
1145
|
input=run_input.input_content,
|
|
@@ -1111,25 +1163,55 @@ class Agent:
|
|
|
1111
1163
|
|
|
1112
1164
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1113
1165
|
|
|
1166
|
+
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
1167
|
+
memory_future = None
|
|
1168
|
+
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
1169
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
1170
|
+
log_debug("Starting memory creation in background thread.")
|
|
1171
|
+
memory_future = self.background_executor.submit(
|
|
1172
|
+
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
1173
|
+
)
|
|
1174
|
+
|
|
1175
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
1176
|
+
cultural_knowledge_future = None
|
|
1177
|
+
if (
|
|
1178
|
+
run_messages.user_message is not None
|
|
1179
|
+
and self.culture_manager is not None
|
|
1180
|
+
and self.update_cultural_knowledge
|
|
1181
|
+
):
|
|
1182
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
1183
|
+
cultural_knowledge_future = self.background_executor.submit(
|
|
1184
|
+
self._make_cultural_knowledge, run_messages=run_messages
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1114
1187
|
try:
|
|
1115
1188
|
# Start the Run by yielding a RunStarted event
|
|
1116
|
-
if
|
|
1117
|
-
yield
|
|
1189
|
+
if stream_events:
|
|
1190
|
+
yield handle_event( # type: ignore
|
|
1191
|
+
create_run_started_event(run_response),
|
|
1192
|
+
run_response,
|
|
1193
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1194
|
+
store_events=self.store_events,
|
|
1195
|
+
)
|
|
1118
1196
|
|
|
1119
|
-
#
|
|
1120
|
-
yield from self._handle_reasoning_stream(
|
|
1197
|
+
# 5. Reason about the task if reasoning is enabled
|
|
1198
|
+
yield from self._handle_reasoning_stream(
|
|
1199
|
+
run_response=run_response,
|
|
1200
|
+
run_messages=run_messages,
|
|
1201
|
+
stream_events=stream_events,
|
|
1202
|
+
)
|
|
1121
1203
|
|
|
1122
1204
|
# Check for cancellation before model processing
|
|
1123
1205
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1124
1206
|
|
|
1125
|
-
#
|
|
1207
|
+
# 6. Process model response
|
|
1126
1208
|
if self.output_model is None:
|
|
1127
1209
|
for event in self._handle_model_response_stream(
|
|
1128
1210
|
session=session,
|
|
1129
1211
|
run_response=run_response,
|
|
1130
1212
|
run_messages=run_messages,
|
|
1131
1213
|
response_format=response_format,
|
|
1132
|
-
|
|
1214
|
+
stream_events=stream_events,
|
|
1133
1215
|
):
|
|
1134
1216
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1135
1217
|
yield event
|
|
@@ -1144,11 +1226,11 @@ class Agent:
|
|
|
1144
1226
|
run_response=run_response,
|
|
1145
1227
|
run_messages=run_messages,
|
|
1146
1228
|
response_format=response_format,
|
|
1147
|
-
|
|
1229
|
+
stream_events=stream_events,
|
|
1148
1230
|
):
|
|
1149
1231
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1150
1232
|
if isinstance(event, RunContentEvent):
|
|
1151
|
-
if
|
|
1233
|
+
if stream_events:
|
|
1152
1234
|
yield IntermediateRunContentEvent(
|
|
1153
1235
|
content=event.content,
|
|
1154
1236
|
content_type=event.content_type,
|
|
@@ -1161,7 +1243,7 @@ class Agent:
|
|
|
1161
1243
|
session=session,
|
|
1162
1244
|
run_response=run_response,
|
|
1163
1245
|
run_messages=run_messages,
|
|
1164
|
-
|
|
1246
|
+
stream_events=stream_events,
|
|
1165
1247
|
):
|
|
1166
1248
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1167
1249
|
yield event
|
|
@@ -1169,26 +1251,40 @@ class Agent:
|
|
|
1169
1251
|
# Check for cancellation after model processing
|
|
1170
1252
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1171
1253
|
|
|
1172
|
-
#
|
|
1254
|
+
# 7. Parse response with parser model if provided
|
|
1173
1255
|
yield from self._parse_response_with_parser_model_stream(
|
|
1174
|
-
session=session,
|
|
1175
|
-
run_response=run_response,
|
|
1176
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1256
|
+
session=session, run_response=run_response, stream_events=stream_events
|
|
1177
1257
|
)
|
|
1178
1258
|
|
|
1179
1259
|
# We should break out of the run function
|
|
1180
1260
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1181
|
-
yield from
|
|
1261
|
+
yield from wait_for_background_tasks_stream(
|
|
1262
|
+
memory_future=memory_future,
|
|
1263
|
+
cultural_knowledge_future=cultural_knowledge_future,
|
|
1264
|
+
stream_events=stream_events,
|
|
1182
1265
|
run_response=run_response,
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1266
|
+
events_to_skip=self.events_to_skip,
|
|
1267
|
+
store_events=self.store_events,
|
|
1268
|
+
)
|
|
1269
|
+
|
|
1270
|
+
# Handle the paused run
|
|
1271
|
+
yield from self._handle_agent_run_paused_stream(
|
|
1272
|
+
run_response=run_response, session=session, user_id=user_id
|
|
1186
1273
|
)
|
|
1187
1274
|
return
|
|
1188
1275
|
|
|
1276
|
+
# Yield RunContentCompletedEvent
|
|
1277
|
+
if stream_events:
|
|
1278
|
+
yield handle_event( # type: ignore
|
|
1279
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
1280
|
+
run_response,
|
|
1281
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1282
|
+
store_events=self.store_events,
|
|
1283
|
+
)
|
|
1284
|
+
|
|
1189
1285
|
# Execute post-hooks after output is generated but before response is returned
|
|
1190
1286
|
if self.post_hooks is not None:
|
|
1191
|
-
self._execute_post_hooks(
|
|
1287
|
+
yield from self._execute_post_hooks(
|
|
1192
1288
|
hooks=self.post_hooks, # type: ignore
|
|
1193
1289
|
run_output=run_response,
|
|
1194
1290
|
session_state=session_state,
|
|
@@ -1200,48 +1296,56 @@ class Agent:
|
|
|
1200
1296
|
**kwargs,
|
|
1201
1297
|
)
|
|
1202
1298
|
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
# 5. Calculate session metrics
|
|
1210
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
1211
|
-
|
|
1212
|
-
# 6. Optional: Save output to file if save_response_to_file is set
|
|
1213
|
-
self.save_run_response_to_file(
|
|
1299
|
+
# 8. Wait for background memory creation and cultural knowledge creation
|
|
1300
|
+
yield from wait_for_background_tasks_stream(
|
|
1301
|
+
memory_future=memory_future,
|
|
1302
|
+
cultural_knowledge_future=cultural_knowledge_future,
|
|
1303
|
+
stream_events=stream_events,
|
|
1214
1304
|
run_response=run_response,
|
|
1215
|
-
input=run_messages.user_message,
|
|
1216
|
-
session_id=session.session_id,
|
|
1217
|
-
user_id=user_id,
|
|
1218
1305
|
)
|
|
1219
1306
|
|
|
1220
|
-
#
|
|
1221
|
-
|
|
1307
|
+
# 9. Create session summary
|
|
1308
|
+
if self.session_summary_manager is not None:
|
|
1309
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1310
|
+
session.upsert_run(run=run_response)
|
|
1222
1311
|
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1312
|
+
if stream_events:
|
|
1313
|
+
yield handle_event( # type: ignore
|
|
1314
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
1315
|
+
run_response,
|
|
1316
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1317
|
+
store_events=self.store_events,
|
|
1318
|
+
)
|
|
1319
|
+
try:
|
|
1320
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
1321
|
+
except Exception as e:
|
|
1322
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1323
|
+
if stream_events:
|
|
1324
|
+
yield handle_event( # type: ignore
|
|
1325
|
+
create_session_summary_completed_event(
|
|
1326
|
+
from_run_response=run_response, session_summary=session.summary
|
|
1327
|
+
),
|
|
1328
|
+
run_response,
|
|
1329
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1330
|
+
store_events=self.store_events,
|
|
1331
|
+
)
|
|
1230
1332
|
|
|
1231
|
-
#
|
|
1232
|
-
completed_event =
|
|
1233
|
-
create_run_completed_event(from_run_response=run_response),
|
|
1333
|
+
# Create the run completed event
|
|
1334
|
+
completed_event = handle_event( # type: ignore
|
|
1335
|
+
create_run_completed_event(from_run_response=run_response),
|
|
1336
|
+
run_response,
|
|
1337
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1338
|
+
store_events=self.store_events,
|
|
1234
1339
|
)
|
|
1235
1340
|
|
|
1236
|
-
#
|
|
1237
|
-
|
|
1238
|
-
session.upsert_run(run=run_response)
|
|
1341
|
+
# Set the run status to completed
|
|
1342
|
+
run_response.status = RunStatus.completed
|
|
1239
1343
|
|
|
1240
|
-
#
|
|
1241
|
-
self.
|
|
1344
|
+
# 10. Cleanup and store the run response and session
|
|
1345
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1242
1346
|
|
|
1243
|
-
if
|
|
1244
|
-
yield completed_event
|
|
1347
|
+
if stream_events:
|
|
1348
|
+
yield completed_event # type: ignore
|
|
1245
1349
|
|
|
1246
1350
|
if yield_run_response:
|
|
1247
1351
|
yield run_response
|
|
@@ -1258,14 +1362,15 @@ class Agent:
|
|
|
1258
1362
|
run_response.content = str(e)
|
|
1259
1363
|
|
|
1260
1364
|
# Yield the cancellation event
|
|
1261
|
-
yield
|
|
1365
|
+
yield handle_event( # type: ignore
|
|
1262
1366
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
1263
1367
|
run_response,
|
|
1368
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1369
|
+
store_events=self.store_events,
|
|
1264
1370
|
)
|
|
1265
1371
|
|
|
1266
|
-
#
|
|
1267
|
-
|
|
1268
|
-
self.save_session(session=session)
|
|
1372
|
+
# Cleanup and store the run response and session
|
|
1373
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1269
1374
|
finally:
|
|
1270
1375
|
# Always clean up the run tracking
|
|
1271
1376
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -1276,6 +1381,7 @@ class Agent:
|
|
|
1276
1381
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1277
1382
|
*,
|
|
1278
1383
|
stream: Literal[False] = False,
|
|
1384
|
+
stream_events: Optional[bool] = None,
|
|
1279
1385
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1280
1386
|
user_id: Optional[str] = None,
|
|
1281
1387
|
session_id: Optional[str] = None,
|
|
@@ -1301,6 +1407,7 @@ class Agent:
|
|
|
1301
1407
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1302
1408
|
*,
|
|
1303
1409
|
stream: Literal[True] = True,
|
|
1410
|
+
stream_events: Optional[bool] = None,
|
|
1304
1411
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1305
1412
|
user_id: Optional[str] = None,
|
|
1306
1413
|
session_id: Optional[str] = None,
|
|
@@ -1326,6 +1433,7 @@ class Agent:
|
|
|
1326
1433
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1327
1434
|
*,
|
|
1328
1435
|
stream: Optional[bool] = None,
|
|
1436
|
+
stream_events: Optional[bool] = None,
|
|
1329
1437
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1330
1438
|
user_id: Optional[str] = None,
|
|
1331
1439
|
session_id: Optional[str] = None,
|
|
@@ -1351,6 +1459,11 @@ class Agent:
|
|
|
1351
1459
|
"`run` method is not supported with an async database. Please use `arun` method instead."
|
|
1352
1460
|
)
|
|
1353
1461
|
|
|
1462
|
+
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
1463
|
+
log_warning(
|
|
1464
|
+
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
1465
|
+
)
|
|
1466
|
+
|
|
1354
1467
|
# Create a run_id for this specific run
|
|
1355
1468
|
run_id = str(uuid4())
|
|
1356
1469
|
|
|
@@ -1365,12 +1478,7 @@ class Agent:
|
|
|
1365
1478
|
self.post_hooks = normalize_hooks(self.post_hooks)
|
|
1366
1479
|
self._hooks_normalised = True
|
|
1367
1480
|
|
|
1368
|
-
session_id, user_id
|
|
1369
|
-
run_id=run_id,
|
|
1370
|
-
session_id=session_id,
|
|
1371
|
-
user_id=user_id,
|
|
1372
|
-
session_state=session_state,
|
|
1373
|
-
)
|
|
1481
|
+
session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
|
|
1374
1482
|
|
|
1375
1483
|
# Initialize the Agent
|
|
1376
1484
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -1392,15 +1500,19 @@ class Agent:
|
|
|
1392
1500
|
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1393
1501
|
self._update_metadata(session=agent_session)
|
|
1394
1502
|
|
|
1503
|
+
# Initialize session state
|
|
1504
|
+
session_state = self._initialize_session_state(
|
|
1505
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
1506
|
+
)
|
|
1395
1507
|
# Update session state from DB
|
|
1396
1508
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1397
|
-
|
|
1398
1509
|
# Determine runtime dependencies
|
|
1399
1510
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
1400
1511
|
|
|
1401
1512
|
# Resolve dependencies
|
|
1402
1513
|
if run_dependencies is not None:
|
|
1403
1514
|
self._resolve_run_dependencies(dependencies=run_dependencies)
|
|
1515
|
+
|
|
1404
1516
|
add_dependencies = (
|
|
1405
1517
|
add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
|
|
1406
1518
|
)
|
|
@@ -1422,17 +1534,18 @@ class Agent:
|
|
|
1422
1534
|
if stream is None:
|
|
1423
1535
|
stream = False if self.stream is None else self.stream
|
|
1424
1536
|
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
1428
|
-
)
|
|
1537
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
1538
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
1429
1539
|
|
|
1430
|
-
# Can't
|
|
1540
|
+
# Can't stream events if streaming is disabled
|
|
1431
1541
|
if stream is False:
|
|
1432
|
-
|
|
1542
|
+
stream_events = False
|
|
1543
|
+
|
|
1544
|
+
if stream_events is None:
|
|
1545
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
1433
1546
|
|
|
1434
1547
|
self.stream = self.stream or stream
|
|
1435
|
-
self.
|
|
1548
|
+
self.stream_events = self.stream_events or stream_events
|
|
1436
1549
|
|
|
1437
1550
|
# Prepare arguments for the model
|
|
1438
1551
|
response_format = self._get_response_format() if self.parser_model is None else None
|
|
@@ -1484,7 +1597,7 @@ class Agent:
|
|
|
1484
1597
|
metadata=metadata,
|
|
1485
1598
|
dependencies=run_dependencies,
|
|
1486
1599
|
response_format=response_format,
|
|
1487
|
-
|
|
1600
|
+
stream_events=stream_events,
|
|
1488
1601
|
yield_run_response=yield_run_response,
|
|
1489
1602
|
debug_mode=debug_mode,
|
|
1490
1603
|
**kwargs,
|
|
@@ -1523,17 +1636,6 @@ class Agent:
|
|
|
1523
1636
|
import time
|
|
1524
1637
|
|
|
1525
1638
|
time.sleep(delay)
|
|
1526
|
-
except RunCancelledException as e:
|
|
1527
|
-
# Handle run cancellation
|
|
1528
|
-
log_info(f"Run {run_response.run_id} was cancelled")
|
|
1529
|
-
run_response.content = str(e)
|
|
1530
|
-
run_response.status = RunStatus.cancelled
|
|
1531
|
-
|
|
1532
|
-
# Add the RunOutput to Agent Session even when cancelled
|
|
1533
|
-
agent_session.upsert_run(run=run_response)
|
|
1534
|
-
self.save_session(session=agent_session)
|
|
1535
|
-
|
|
1536
|
-
return run_response
|
|
1537
1639
|
except KeyboardInterrupt:
|
|
1538
1640
|
run_response.content = "Operation cancelled by user"
|
|
1539
1641
|
run_response.status = RunStatus.cancelled
|
|
@@ -1578,7 +1680,7 @@ class Agent:
|
|
|
1578
1680
|
debug_mode: Optional[bool] = None,
|
|
1579
1681
|
**kwargs: Any,
|
|
1580
1682
|
) -> RunOutput:
|
|
1581
|
-
"""Run the Agent and
|
|
1683
|
+
"""Run the Agent and return the RunOutput.
|
|
1582
1684
|
|
|
1583
1685
|
Steps:
|
|
1584
1686
|
1. Read or create session
|
|
@@ -1587,14 +1689,16 @@ class Agent:
|
|
|
1587
1689
|
4. Execute pre-hooks
|
|
1588
1690
|
5. Determine tools for model
|
|
1589
1691
|
6. Prepare run messages
|
|
1590
|
-
7.
|
|
1591
|
-
8.
|
|
1592
|
-
9.
|
|
1593
|
-
10.
|
|
1594
|
-
11.
|
|
1595
|
-
12.
|
|
1596
|
-
13.
|
|
1597
|
-
14.
|
|
1692
|
+
7. Start memory creation in background task
|
|
1693
|
+
8. Reason about the task if reasoning is enabled
|
|
1694
|
+
9. Generate a response from the Model (includes running function calls)
|
|
1695
|
+
10. Update the RunOutput with the model response
|
|
1696
|
+
11. Convert response to structured format
|
|
1697
|
+
12. Store media if enabled
|
|
1698
|
+
13. Execute post-hooks
|
|
1699
|
+
14. Wait for background memory creation
|
|
1700
|
+
15. Create session summary
|
|
1701
|
+
16. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
1598
1702
|
"""
|
|
1599
1703
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1600
1704
|
|
|
@@ -1602,13 +1706,15 @@ class Agent:
|
|
|
1602
1706
|
register_run(run_response.run_id) # type: ignore
|
|
1603
1707
|
|
|
1604
1708
|
# 1. Read or create session. Reads from the database if provided.
|
|
1605
|
-
|
|
1606
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1607
|
-
else:
|
|
1608
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1709
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1609
1710
|
|
|
1610
1711
|
# 2. Update metadata and session state
|
|
1611
1712
|
self._update_metadata(session=agent_session)
|
|
1713
|
+
# Initialize session state
|
|
1714
|
+
session_state = self._initialize_session_state(
|
|
1715
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
|
|
1716
|
+
)
|
|
1717
|
+
# Update session state from DB
|
|
1612
1718
|
if session_state is not None:
|
|
1613
1719
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1614
1720
|
|
|
@@ -1672,12 +1778,37 @@ class Agent:
|
|
|
1672
1778
|
if len(run_messages.messages) == 0:
|
|
1673
1779
|
log_error("No messages to be sent to the model.")
|
|
1674
1780
|
|
|
1781
|
+
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
1782
|
+
memory_task = None
|
|
1783
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
1784
|
+
import asyncio
|
|
1785
|
+
|
|
1786
|
+
log_debug("Starting memory creation in background task.")
|
|
1787
|
+
memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
1788
|
+
|
|
1789
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
1790
|
+
cultural_knowledge_task = None
|
|
1791
|
+
if (
|
|
1792
|
+
run_messages.user_message is not None
|
|
1793
|
+
and self.culture_manager is not None
|
|
1794
|
+
and self.update_cultural_knowledge
|
|
1795
|
+
):
|
|
1796
|
+
import asyncio
|
|
1797
|
+
|
|
1798
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
1799
|
+
cultural_knowledge_task = asyncio.create_task(self._acreate_cultural_knowledge(run_messages=run_messages))
|
|
1800
|
+
|
|
1675
1801
|
try:
|
|
1676
|
-
#
|
|
1802
|
+
# Check for cancellation before model call
|
|
1803
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1804
|
+
|
|
1805
|
+
# 8. Reason about the task if reasoning is enabled
|
|
1677
1806
|
await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
1807
|
+
|
|
1808
|
+
# Check for cancellation before model call
|
|
1678
1809
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1679
1810
|
|
|
1680
|
-
#
|
|
1811
|
+
# 9. Generate a response from the Model (includes running function calls)
|
|
1681
1812
|
model_response: ModelResponse = await self.model.aresponse(
|
|
1682
1813
|
messages=run_messages.messages,
|
|
1683
1814
|
tools=self._tools_for_model,
|
|
@@ -1687,43 +1818,42 @@ class Agent:
|
|
|
1687
1818
|
response_format=response_format,
|
|
1688
1819
|
send_media_to_model=self.send_media_to_model,
|
|
1689
1820
|
)
|
|
1821
|
+
|
|
1822
|
+
# Check for cancellation after model call
|
|
1690
1823
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1691
1824
|
|
|
1692
1825
|
# If an output model is provided, generate output using the output model
|
|
1693
1826
|
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
1827
|
+
|
|
1694
1828
|
# If a parser model is provided, structure the response separately
|
|
1695
1829
|
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1696
1830
|
|
|
1697
|
-
#
|
|
1831
|
+
# 10. Update the RunOutput with the model response
|
|
1698
1832
|
self._update_run_response(
|
|
1699
1833
|
model_response=model_response,
|
|
1700
1834
|
run_response=run_response,
|
|
1701
1835
|
run_messages=run_messages,
|
|
1702
1836
|
)
|
|
1703
1837
|
|
|
1704
|
-
#
|
|
1705
|
-
if self.store_media:
|
|
1706
|
-
self._store_media(run_response, model_response)
|
|
1707
|
-
|
|
1708
|
-
# Break out of the run function if a tool call is paused
|
|
1838
|
+
# We should break out of the run function
|
|
1709
1839
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
user_id=user_id
|
|
1840
|
+
await await_for_background_tasks(
|
|
1841
|
+
memory_task=memory_task, cultural_knowledge_task=cultural_knowledge_task
|
|
1842
|
+
)
|
|
1843
|
+
return await self._ahandle_agent_run_paused(
|
|
1844
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
1715
1845
|
)
|
|
1716
|
-
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1717
|
-
|
|
1718
|
-
# 10. Calculate session metrics
|
|
1719
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
1720
1846
|
|
|
1721
|
-
# Convert the response to the structured format if needed
|
|
1847
|
+
# 11. Convert the response to the structured format if needed
|
|
1722
1848
|
self._convert_response_to_structured_format(run_response)
|
|
1723
1849
|
|
|
1724
|
-
#
|
|
1850
|
+
# 12. Store media if enabled
|
|
1851
|
+
if self.store_media:
|
|
1852
|
+
self._store_media(run_response, model_response)
|
|
1853
|
+
|
|
1854
|
+
# 13. Execute post-hooks (after output is generated but before response is returned)
|
|
1725
1855
|
if self.post_hooks is not None:
|
|
1726
|
-
|
|
1856
|
+
async for _ in self._aexecute_post_hooks(
|
|
1727
1857
|
hooks=self.post_hooks, # type: ignore
|
|
1728
1858
|
run_output=run_response,
|
|
1729
1859
|
session_state=session_state,
|
|
@@ -1733,44 +1863,28 @@ class Agent:
|
|
|
1733
1863
|
user_id=user_id,
|
|
1734
1864
|
debug_mode=debug_mode,
|
|
1735
1865
|
**kwargs,
|
|
1736
|
-
)
|
|
1866
|
+
):
|
|
1867
|
+
pass
|
|
1737
1868
|
|
|
1738
|
-
#
|
|
1739
|
-
run_response.
|
|
1869
|
+
# Check for cancellation
|
|
1870
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1740
1871
|
|
|
1741
|
-
#
|
|
1742
|
-
|
|
1743
|
-
run_response.metrics.stop_timer()
|
|
1872
|
+
# 14. Wait for background memory creation
|
|
1873
|
+
await await_for_background_tasks(memory_task=memory_task, cultural_knowledge_task=cultural_knowledge_task)
|
|
1744
1874
|
|
|
1745
|
-
#
|
|
1746
|
-
self.
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1875
|
+
# 15. Create session summary
|
|
1876
|
+
if self.session_summary_manager is not None:
|
|
1877
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1878
|
+
agent_session.upsert_run(run=run_response)
|
|
1879
|
+
try:
|
|
1880
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
1881
|
+
except Exception as e:
|
|
1882
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1752
1883
|
|
|
1753
|
-
|
|
1754
|
-
agent_session.upsert_run(run=run_response)
|
|
1884
|
+
run_response.status = RunStatus.completed
|
|
1755
1885
|
|
|
1756
|
-
#
|
|
1757
|
-
|
|
1758
|
-
run_response=run_response,
|
|
1759
|
-
run_messages=run_messages,
|
|
1760
|
-
session=agent_session,
|
|
1761
|
-
user_id=user_id,
|
|
1762
|
-
):
|
|
1763
|
-
pass
|
|
1764
|
-
|
|
1765
|
-
# 13. Scrub the stored run based on storage flags
|
|
1766
|
-
if self._scrub_run_output_for_storage(run_response):
|
|
1767
|
-
agent_session.upsert_run(run=run_response)
|
|
1768
|
-
|
|
1769
|
-
# 14. Save session to storage
|
|
1770
|
-
if self._has_async_db():
|
|
1771
|
-
await self.asave_session(session=agent_session)
|
|
1772
|
-
else:
|
|
1773
|
-
self.save_session(session=agent_session)
|
|
1886
|
+
# 16. Cleanup and store the run response and session
|
|
1887
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
1774
1888
|
|
|
1775
1889
|
# Log Agent Telemetry
|
|
1776
1890
|
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
@@ -1785,16 +1899,30 @@ class Agent:
|
|
|
1785
1899
|
run_response.content = str(e)
|
|
1786
1900
|
run_response.status = RunStatus.cancelled
|
|
1787
1901
|
|
|
1788
|
-
#
|
|
1789
|
-
|
|
1790
|
-
if self._has_async_db():
|
|
1791
|
-
await self.asave_session(session=agent_session)
|
|
1792
|
-
else:
|
|
1793
|
-
self.save_session(session=agent_session)
|
|
1902
|
+
# Cleanup and store the run response and session
|
|
1903
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
1794
1904
|
|
|
1795
1905
|
return run_response
|
|
1796
1906
|
|
|
1797
1907
|
finally:
|
|
1908
|
+
# Cancel the memory task if it's still running
|
|
1909
|
+
if memory_task is not None and not memory_task.done():
|
|
1910
|
+
import asyncio
|
|
1911
|
+
|
|
1912
|
+
memory_task.cancel()
|
|
1913
|
+
try:
|
|
1914
|
+
await memory_task
|
|
1915
|
+
except asyncio.CancelledError:
|
|
1916
|
+
pass
|
|
1917
|
+
# Cancel the cultural knowledge task if it's still running
|
|
1918
|
+
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
|
|
1919
|
+
import asyncio
|
|
1920
|
+
|
|
1921
|
+
cultural_knowledge_task.cancel()
|
|
1922
|
+
try:
|
|
1923
|
+
await cultural_knowledge_task
|
|
1924
|
+
except asyncio.CancelledError:
|
|
1925
|
+
pass
|
|
1798
1926
|
# Always clean up the run tracking
|
|
1799
1927
|
cleanup_run(run_response.run_id) # type: ignore
|
|
1800
1928
|
|
|
@@ -1811,7 +1939,7 @@ class Agent:
|
|
|
1811
1939
|
add_session_state_to_context: Optional[bool] = None,
|
|
1812
1940
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1813
1941
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1814
|
-
|
|
1942
|
+
stream_events: bool = False,
|
|
1815
1943
|
yield_run_response: Optional[bool] = None,
|
|
1816
1944
|
debug_mode: Optional[bool] = None,
|
|
1817
1945
|
**kwargs: Any,
|
|
@@ -1825,28 +1953,35 @@ class Agent:
|
|
|
1825
1953
|
4. Execute pre-hooks
|
|
1826
1954
|
5. Determine tools for model
|
|
1827
1955
|
6. Prepare run messages
|
|
1828
|
-
7.
|
|
1829
|
-
8.
|
|
1830
|
-
9.
|
|
1831
|
-
10.
|
|
1832
|
-
11.
|
|
1833
|
-
12. Create
|
|
1834
|
-
13.
|
|
1956
|
+
7. Start memory creation in background task
|
|
1957
|
+
8. Reason about the task if reasoning is enabled
|
|
1958
|
+
9. Generate a response from the Model (includes running function calls)
|
|
1959
|
+
10. Parse response with parser model if provided
|
|
1960
|
+
11. Wait for background memory creation
|
|
1961
|
+
12. Create session summary
|
|
1962
|
+
13. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
1835
1963
|
"""
|
|
1836
1964
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1837
1965
|
|
|
1838
1966
|
# Start the Run by yielding a RunStarted event
|
|
1839
|
-
if
|
|
1840
|
-
yield
|
|
1967
|
+
if stream_events:
|
|
1968
|
+
yield handle_event( # type: ignore
|
|
1969
|
+
create_run_started_event(run_response),
|
|
1970
|
+
run_response,
|
|
1971
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1972
|
+
store_events=self.store_events,
|
|
1973
|
+
)
|
|
1841
1974
|
|
|
1842
1975
|
# 1. Read or create session. Reads from the database if provided.
|
|
1843
|
-
|
|
1844
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1845
|
-
else:
|
|
1846
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1976
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1847
1977
|
|
|
1848
1978
|
# 2. Update metadata and session state
|
|
1849
1979
|
self._update_metadata(session=agent_session)
|
|
1980
|
+
# Initialize session state
|
|
1981
|
+
session_state = self._initialize_session_state(
|
|
1982
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
|
|
1983
|
+
)
|
|
1984
|
+
# Update session state from DB
|
|
1850
1985
|
if session_state is not None:
|
|
1851
1986
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1852
1987
|
|
|
@@ -1864,6 +1999,9 @@ class Agent:
|
|
|
1864
1999
|
run_response=run_response,
|
|
1865
2000
|
run_input=run_input,
|
|
1866
2001
|
session=agent_session,
|
|
2002
|
+
session_state=session_state,
|
|
2003
|
+
dependencies=dependencies,
|
|
2004
|
+
metadata=metadata,
|
|
1867
2005
|
user_id=user_id,
|
|
1868
2006
|
debug_mode=debug_mode,
|
|
1869
2007
|
**kwargs,
|
|
@@ -1906,24 +2044,49 @@ class Agent:
|
|
|
1906
2044
|
if len(run_messages.messages) == 0:
|
|
1907
2045
|
log_error("No messages to be sent to the model.")
|
|
1908
2046
|
|
|
2047
|
+
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
2048
|
+
memory_task = None
|
|
2049
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
2050
|
+
import asyncio
|
|
2051
|
+
|
|
2052
|
+
log_debug("Starting memory creation in background task.")
|
|
2053
|
+
memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
2054
|
+
|
|
2055
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
2056
|
+
cultural_knowledge_task = None
|
|
2057
|
+
if (
|
|
2058
|
+
run_messages.user_message is not None
|
|
2059
|
+
and self.culture_manager is not None
|
|
2060
|
+
and self.update_cultural_knowledge
|
|
2061
|
+
):
|
|
2062
|
+
import asyncio
|
|
2063
|
+
|
|
2064
|
+
log_debug("Starting cultural knowledge creation in background task.")
|
|
2065
|
+
cultural_knowledge_task = asyncio.create_task(self._acreate_cultural_knowledge(run_messages=run_messages))
|
|
2066
|
+
|
|
1909
2067
|
# Register run for cancellation tracking
|
|
1910
2068
|
register_run(run_response.run_id) # type: ignore
|
|
1911
2069
|
|
|
1912
2070
|
try:
|
|
1913
|
-
#
|
|
1914
|
-
async for item in self._ahandle_reasoning_stream(
|
|
2071
|
+
# 8. Reason about the task if reasoning is enabled
|
|
2072
|
+
async for item in self._ahandle_reasoning_stream(
|
|
2073
|
+
run_response=run_response,
|
|
2074
|
+
run_messages=run_messages,
|
|
2075
|
+
stream_events=stream_events,
|
|
2076
|
+
):
|
|
1915
2077
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1916
2078
|
yield item
|
|
2079
|
+
|
|
1917
2080
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1918
2081
|
|
|
1919
|
-
#
|
|
2082
|
+
# 9. Generate a response from the Model
|
|
1920
2083
|
if self.output_model is None:
|
|
1921
2084
|
async for event in self._ahandle_model_response_stream(
|
|
1922
2085
|
session=agent_session,
|
|
1923
2086
|
run_response=run_response,
|
|
1924
2087
|
run_messages=run_messages,
|
|
1925
2088
|
response_format=response_format,
|
|
1926
|
-
|
|
2089
|
+
stream_events=stream_events,
|
|
1927
2090
|
):
|
|
1928
2091
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1929
2092
|
yield event
|
|
@@ -1938,11 +2101,11 @@ class Agent:
|
|
|
1938
2101
|
run_response=run_response,
|
|
1939
2102
|
run_messages=run_messages,
|
|
1940
2103
|
response_format=response_format,
|
|
1941
|
-
|
|
2104
|
+
stream_events=stream_events,
|
|
1942
2105
|
):
|
|
1943
2106
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1944
2107
|
if isinstance(event, RunContentEvent):
|
|
1945
|
-
if
|
|
2108
|
+
if stream_events:
|
|
1946
2109
|
yield IntermediateRunContentEvent(
|
|
1947
2110
|
content=event.content,
|
|
1948
2111
|
content_type=event.content_type,
|
|
@@ -1955,7 +2118,7 @@ class Agent:
|
|
|
1955
2118
|
session=agent_session,
|
|
1956
2119
|
run_response=run_response,
|
|
1957
2120
|
run_messages=run_messages,
|
|
1958
|
-
|
|
2121
|
+
stream_events=stream_events,
|
|
1959
2122
|
):
|
|
1960
2123
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1961
2124
|
yield event
|
|
@@ -1963,28 +2126,39 @@ class Agent:
|
|
|
1963
2126
|
# Check for cancellation after model processing
|
|
1964
2127
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1965
2128
|
|
|
1966
|
-
#
|
|
2129
|
+
# 10. Parse response with parser model if provided
|
|
1967
2130
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
1968
|
-
session=agent_session,
|
|
1969
|
-
run_response=run_response,
|
|
1970
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2131
|
+
session=agent_session, run_response=run_response, stream_events=stream_events
|
|
1971
2132
|
):
|
|
1972
2133
|
yield event
|
|
1973
2134
|
|
|
2135
|
+
if stream_events:
|
|
2136
|
+
yield handle_event( # type: ignore
|
|
2137
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
2138
|
+
run_response,
|
|
2139
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2140
|
+
store_events=self.store_events,
|
|
2141
|
+
)
|
|
2142
|
+
|
|
1974
2143
|
# Break out of the run function if a tool call is paused
|
|
1975
2144
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1976
|
-
for item in
|
|
2145
|
+
async for item in await_for_background_tasks_stream(
|
|
2146
|
+
memory_task=memory_task,
|
|
2147
|
+
cultural_knowledge_task=cultural_knowledge_task,
|
|
2148
|
+
stream_events=stream_events,
|
|
1977
2149
|
run_response=run_response,
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
2150
|
+
):
|
|
2151
|
+
yield item
|
|
2152
|
+
|
|
2153
|
+
async for item in self._ahandle_agent_run_paused_stream(
|
|
2154
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
1981
2155
|
):
|
|
1982
2156
|
yield item
|
|
1983
2157
|
return
|
|
1984
2158
|
|
|
1985
2159
|
# Execute post-hooks (after output is generated but before response is returned)
|
|
1986
2160
|
if self.post_hooks is not None:
|
|
1987
|
-
|
|
2161
|
+
async for event in self._aexecute_post_hooks(
|
|
1988
2162
|
hooks=self.post_hooks, # type: ignore
|
|
1989
2163
|
run_output=run_response,
|
|
1990
2164
|
session_state=session_state,
|
|
@@ -1994,55 +2168,62 @@ class Agent:
|
|
|
1994
2168
|
user_id=user_id,
|
|
1995
2169
|
debug_mode=debug_mode,
|
|
1996
2170
|
**kwargs,
|
|
1997
|
-
)
|
|
1998
|
-
|
|
1999
|
-
# Set the run status to completed
|
|
2000
|
-
run_response.status = RunStatus.completed
|
|
2001
|
-
|
|
2002
|
-
# Set the run duration
|
|
2003
|
-
if run_response.metrics:
|
|
2004
|
-
run_response.metrics.stop_timer()
|
|
2005
|
-
|
|
2006
|
-
# 9. Calculate session metrics
|
|
2007
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
2171
|
+
):
|
|
2172
|
+
yield event
|
|
2008
2173
|
|
|
2009
|
-
#
|
|
2010
|
-
|
|
2174
|
+
# 11. Wait for background memory creation
|
|
2175
|
+
async for item in await_for_background_tasks_stream(
|
|
2176
|
+
memory_task=memory_task,
|
|
2177
|
+
cultural_knowledge_task=cultural_knowledge_task,
|
|
2178
|
+
stream_events=stream_events,
|
|
2011
2179
|
run_response=run_response,
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2180
|
+
events_to_skip=self.events_to_skip,
|
|
2181
|
+
store_events=self.store_events,
|
|
2182
|
+
):
|
|
2183
|
+
yield item
|
|
2016
2184
|
|
|
2017
|
-
#
|
|
2018
|
-
|
|
2185
|
+
# 12. Create session summary
|
|
2186
|
+
if self.session_summary_manager is not None:
|
|
2187
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2188
|
+
agent_session.upsert_run(run=run_response)
|
|
2019
2189
|
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
|
|
2190
|
+
if stream_events:
|
|
2191
|
+
yield handle_event( # type: ignore
|
|
2192
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
2193
|
+
run_response,
|
|
2194
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2195
|
+
store_events=self.store_events,
|
|
2196
|
+
)
|
|
2197
|
+
try:
|
|
2198
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
2199
|
+
except Exception as e:
|
|
2200
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2201
|
+
if stream_events:
|
|
2202
|
+
yield handle_event( # type: ignore
|
|
2203
|
+
create_session_summary_completed_event(
|
|
2204
|
+
from_run_response=run_response, session_summary=agent_session.summary
|
|
2205
|
+
),
|
|
2206
|
+
run_response,
|
|
2207
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2208
|
+
store_events=self.store_events,
|
|
2209
|
+
)
|
|
2028
2210
|
|
|
2029
|
-
#
|
|
2030
|
-
completed_event =
|
|
2031
|
-
create_run_completed_event(from_run_response=run_response),
|
|
2211
|
+
# Create the run completed event
|
|
2212
|
+
completed_event = handle_event(
|
|
2213
|
+
create_run_completed_event(from_run_response=run_response),
|
|
2214
|
+
run_response,
|
|
2215
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2216
|
+
store_events=self.store_events,
|
|
2032
2217
|
)
|
|
2033
2218
|
|
|
2034
|
-
#
|
|
2035
|
-
|
|
2036
|
-
agent_session.upsert_run(run=run_response)
|
|
2219
|
+
# Set the run status to completed
|
|
2220
|
+
run_response.status = RunStatus.completed
|
|
2037
2221
|
|
|
2038
|
-
#
|
|
2039
|
-
|
|
2040
|
-
await self.asave_session(session=agent_session)
|
|
2041
|
-
else:
|
|
2042
|
-
self.save_session(session=agent_session)
|
|
2222
|
+
# 13. Cleanup and store the run response and session
|
|
2223
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
2043
2224
|
|
|
2044
|
-
if
|
|
2045
|
-
yield completed_event
|
|
2225
|
+
if stream_events:
|
|
2226
|
+
yield completed_event # type: ignore
|
|
2046
2227
|
|
|
2047
2228
|
if yield_run_response:
|
|
2048
2229
|
yield run_response
|
|
@@ -2059,18 +2240,31 @@ class Agent:
|
|
|
2059
2240
|
run_response.content = str(e)
|
|
2060
2241
|
|
|
2061
2242
|
# Yield the cancellation event
|
|
2062
|
-
yield
|
|
2243
|
+
yield handle_event( # type: ignore
|
|
2063
2244
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
2064
2245
|
run_response,
|
|
2246
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2247
|
+
store_events=self.store_events,
|
|
2065
2248
|
)
|
|
2066
2249
|
|
|
2067
|
-
#
|
|
2068
|
-
|
|
2069
|
-
if self._has_async_db():
|
|
2070
|
-
await self.asave_session(session=agent_session)
|
|
2071
|
-
else:
|
|
2072
|
-
self.save_session(session=agent_session)
|
|
2250
|
+
# Cleanup and store the run response and session
|
|
2251
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
2073
2252
|
finally:
|
|
2253
|
+
# Cancel the memory task if it's still running
|
|
2254
|
+
if memory_task is not None and not memory_task.done():
|
|
2255
|
+
memory_task.cancel()
|
|
2256
|
+
try:
|
|
2257
|
+
await memory_task
|
|
2258
|
+
except asyncio.CancelledError:
|
|
2259
|
+
pass
|
|
2260
|
+
|
|
2261
|
+
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
|
|
2262
|
+
cultural_knowledge_task.cancel()
|
|
2263
|
+
try:
|
|
2264
|
+
await cultural_knowledge_task
|
|
2265
|
+
except asyncio.CancelledError:
|
|
2266
|
+
pass
|
|
2267
|
+
|
|
2074
2268
|
# Always clean up the run tracking
|
|
2075
2269
|
cleanup_run(run_response.run_id) # type: ignore
|
|
2076
2270
|
|
|
@@ -2087,6 +2281,7 @@ class Agent:
|
|
|
2087
2281
|
images: Optional[Sequence[Image]] = None,
|
|
2088
2282
|
videos: Optional[Sequence[Video]] = None,
|
|
2089
2283
|
files: Optional[Sequence[File]] = None,
|
|
2284
|
+
stream_events: Optional[bool] = None,
|
|
2090
2285
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2091
2286
|
retries: Optional[int] = None,
|
|
2092
2287
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2111,6 +2306,7 @@ class Agent:
|
|
|
2111
2306
|
images: Optional[Sequence[Image]] = None,
|
|
2112
2307
|
videos: Optional[Sequence[Video]] = None,
|
|
2113
2308
|
files: Optional[Sequence[File]] = None,
|
|
2309
|
+
stream_events: Optional[bool] = None,
|
|
2114
2310
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2115
2311
|
retries: Optional[int] = None,
|
|
2116
2312
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2136,6 +2332,7 @@ class Agent:
|
|
|
2136
2332
|
images: Optional[Sequence[Image]] = None,
|
|
2137
2333
|
videos: Optional[Sequence[Video]] = None,
|
|
2138
2334
|
files: Optional[Sequence[File]] = None,
|
|
2335
|
+
stream_events: Optional[bool] = None,
|
|
2139
2336
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2140
2337
|
retries: Optional[int] = None,
|
|
2141
2338
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2150,6 +2347,11 @@ class Agent:
|
|
|
2150
2347
|
) -> Union[RunOutput, AsyncIterator[RunOutputEvent]]:
|
|
2151
2348
|
"""Async Run the Agent and return the response."""
|
|
2152
2349
|
|
|
2350
|
+
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
2351
|
+
log_warning(
|
|
2352
|
+
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
2353
|
+
)
|
|
2354
|
+
|
|
2153
2355
|
# Create a run_id for this specific run
|
|
2154
2356
|
run_id = str(uuid4())
|
|
2155
2357
|
|
|
@@ -2165,12 +2367,7 @@ class Agent:
|
|
|
2165
2367
|
self._hooks_normalised = True
|
|
2166
2368
|
|
|
2167
2369
|
# Initialize session
|
|
2168
|
-
session_id, user_id
|
|
2169
|
-
run_id=run_id,
|
|
2170
|
-
session_id=session_id,
|
|
2171
|
-
user_id=user_id,
|
|
2172
|
-
session_state=session_state,
|
|
2173
|
-
)
|
|
2370
|
+
session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
|
|
2174
2371
|
|
|
2175
2372
|
# Initialize the Agent
|
|
2176
2373
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -2204,17 +2401,18 @@ class Agent:
|
|
|
2204
2401
|
if stream is None:
|
|
2205
2402
|
stream = False if self.stream is None else self.stream
|
|
2206
2403
|
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
2210
|
-
)
|
|
2404
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2405
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
2211
2406
|
|
|
2212
|
-
# Can't
|
|
2407
|
+
# Can't stream events if streaming is disabled
|
|
2213
2408
|
if stream is False:
|
|
2214
|
-
|
|
2409
|
+
stream_events = False
|
|
2410
|
+
|
|
2411
|
+
if stream_events is None:
|
|
2412
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2215
2413
|
|
|
2216
2414
|
self.stream = self.stream or stream
|
|
2217
|
-
self.
|
|
2415
|
+
self.stream_events = self.stream_events or stream_events
|
|
2218
2416
|
|
|
2219
2417
|
# Prepare arguments for the model
|
|
2220
2418
|
response_format = self._get_response_format() if self.parser_model is None else None
|
|
@@ -2264,7 +2462,7 @@ class Agent:
|
|
|
2264
2462
|
run_response=run_response,
|
|
2265
2463
|
user_id=user_id,
|
|
2266
2464
|
response_format=response_format,
|
|
2267
|
-
|
|
2465
|
+
stream_events=stream_events,
|
|
2268
2466
|
yield_run_response=yield_run_response,
|
|
2269
2467
|
dependencies=run_dependencies,
|
|
2270
2468
|
session_id=session_id,
|
|
@@ -2346,6 +2544,7 @@ class Agent:
|
|
|
2346
2544
|
run_id: Optional[str] = None,
|
|
2347
2545
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2348
2546
|
stream: Literal[False] = False,
|
|
2547
|
+
stream_events: Optional[bool] = None,
|
|
2349
2548
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2350
2549
|
user_id: Optional[str] = None,
|
|
2351
2550
|
session_id: Optional[str] = None,
|
|
@@ -2364,6 +2563,7 @@ class Agent:
|
|
|
2364
2563
|
run_id: Optional[str] = None,
|
|
2365
2564
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2366
2565
|
stream: Literal[True] = True,
|
|
2566
|
+
stream_events: Optional[bool] = False,
|
|
2367
2567
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2368
2568
|
user_id: Optional[str] = None,
|
|
2369
2569
|
session_id: Optional[str] = None,
|
|
@@ -2381,6 +2581,7 @@ class Agent:
|
|
|
2381
2581
|
run_id: Optional[str] = None,
|
|
2382
2582
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2383
2583
|
stream: Optional[bool] = None,
|
|
2584
|
+
stream_events: Optional[bool] = False,
|
|
2384
2585
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2385
2586
|
user_id: Optional[str] = None,
|
|
2386
2587
|
session_id: Optional[str] = None,
|
|
@@ -2398,13 +2599,15 @@ class Agent:
|
|
|
2398
2599
|
run_id: The run id to continue. Alternative to passing run_response.
|
|
2399
2600
|
updated_tools: The updated tools to use for the run. Required to be used with `run_id`.
|
|
2400
2601
|
stream: Whether to stream the response.
|
|
2401
|
-
|
|
2602
|
+
stream_events: Whether to stream all events.
|
|
2402
2603
|
user_id: The user id to continue the run for.
|
|
2403
2604
|
session_id: The session id to continue the run for.
|
|
2404
2605
|
retries: The number of retries to continue the run for.
|
|
2405
2606
|
knowledge_filters: The knowledge filters to use for the run.
|
|
2406
2607
|
dependencies: The dependencies to use for the run.
|
|
2608
|
+
metadata: The metadata to use for the run.
|
|
2407
2609
|
debug_mode: Whether to enable debug mode.
|
|
2610
|
+
(deprecated) stream_intermediate_steps: Whether to stream all steps.
|
|
2408
2611
|
"""
|
|
2409
2612
|
if run_response is None and run_id is None:
|
|
2410
2613
|
raise ValueError("Either run_response or run_id must be provided.")
|
|
@@ -2417,10 +2620,9 @@ class Agent:
|
|
|
2417
2620
|
|
|
2418
2621
|
session_id = run_response.session_id if run_response else session_id
|
|
2419
2622
|
|
|
2420
|
-
session_id, user_id
|
|
2421
|
-
run_id=run_id, # type: ignore
|
|
2623
|
+
session_id, user_id = self._initialize_session(
|
|
2422
2624
|
session_id=session_id,
|
|
2423
|
-
user_id=user_id,
|
|
2625
|
+
user_id=user_id,
|
|
2424
2626
|
)
|
|
2425
2627
|
# Initialize the Agent
|
|
2426
2628
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -2429,6 +2631,10 @@ class Agent:
|
|
|
2429
2631
|
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2430
2632
|
self._update_metadata(session=agent_session)
|
|
2431
2633
|
|
|
2634
|
+
# Initialize session state
|
|
2635
|
+
session_state = self._initialize_session_state(
|
|
2636
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
2637
|
+
)
|
|
2432
2638
|
# Update session state from DB
|
|
2433
2639
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
2434
2640
|
|
|
@@ -2458,17 +2664,22 @@ class Agent:
|
|
|
2458
2664
|
if stream is None:
|
|
2459
2665
|
stream = False if self.stream is None else self.stream
|
|
2460
2666
|
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
2464
|
-
)
|
|
2667
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2668
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
2465
2669
|
|
|
2466
|
-
# Can't
|
|
2670
|
+
# Can't stream events if streaming is disabled
|
|
2467
2671
|
if stream is False:
|
|
2468
|
-
|
|
2672
|
+
stream_events = False
|
|
2673
|
+
|
|
2674
|
+
if stream_events is None:
|
|
2675
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2676
|
+
|
|
2677
|
+
# Can't stream events if streaming is disabled
|
|
2678
|
+
if stream is False:
|
|
2679
|
+
stream_events = False
|
|
2469
2680
|
|
|
2470
2681
|
self.stream = self.stream or stream
|
|
2471
|
-
self.
|
|
2682
|
+
self.stream_events = self.stream_events or stream_events
|
|
2472
2683
|
|
|
2473
2684
|
# Run can be continued from previous run response or from passed run_response context
|
|
2474
2685
|
if run_response is not None:
|
|
@@ -2530,7 +2741,7 @@ class Agent:
|
|
|
2530
2741
|
dependencies=run_dependencies,
|
|
2531
2742
|
metadata=metadata,
|
|
2532
2743
|
response_format=response_format,
|
|
2533
|
-
|
|
2744
|
+
stream_events=stream_events,
|
|
2534
2745
|
debug_mode=debug_mode,
|
|
2535
2746
|
**kwargs,
|
|
2536
2747
|
)
|
|
@@ -2604,95 +2815,105 @@ class Agent:
|
|
|
2604
2815
|
Steps:
|
|
2605
2816
|
1. Handle any updated tools
|
|
2606
2817
|
2. Generate a response from the Model
|
|
2607
|
-
3. Update
|
|
2608
|
-
4.
|
|
2609
|
-
5.
|
|
2610
|
-
6.
|
|
2611
|
-
7.
|
|
2818
|
+
3. Update the RunOutput with the model response
|
|
2819
|
+
4. Convert response to structured format
|
|
2820
|
+
5. Store media if enabled
|
|
2821
|
+
6. Execute post-hooks
|
|
2822
|
+
7. Create session summary
|
|
2823
|
+
8. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
2612
2824
|
"""
|
|
2825
|
+
# Register run for cancellation tracking
|
|
2826
|
+
register_run(run_response.run_id) # type: ignore
|
|
2827
|
+
|
|
2613
2828
|
self.model = cast(Model, self.model)
|
|
2614
2829
|
|
|
2615
2830
|
# 1. Handle the updated tools
|
|
2616
2831
|
self._handle_tool_call_updates(run_response=run_response, run_messages=run_messages)
|
|
2617
2832
|
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
messages=run_messages.messages,
|
|
2622
|
-
response_format=response_format,
|
|
2623
|
-
tools=self._tools_for_model,
|
|
2624
|
-
functions=self._functions_for_model,
|
|
2625
|
-
tool_choice=self.tool_choice,
|
|
2626
|
-
tool_call_limit=self.tool_call_limit,
|
|
2627
|
-
)
|
|
2833
|
+
try:
|
|
2834
|
+
# Check for cancellation before model call
|
|
2835
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2628
2836
|
|
|
2629
|
-
|
|
2630
|
-
|
|
2631
|
-
|
|
2632
|
-
|
|
2633
|
-
|
|
2837
|
+
# 2. Generate a response from the Model (includes running function calls)
|
|
2838
|
+
self.model = cast(Model, self.model)
|
|
2839
|
+
model_response: ModelResponse = self.model.response(
|
|
2840
|
+
messages=run_messages.messages,
|
|
2841
|
+
response_format=response_format,
|
|
2842
|
+
tools=self._tools_for_model,
|
|
2843
|
+
functions=self._functions_for_model,
|
|
2844
|
+
tool_choice=self.tool_choice,
|
|
2845
|
+
tool_call_limit=self.tool_call_limit,
|
|
2846
|
+
)
|
|
2634
2847
|
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
|
|
2640
|
-
|
|
2641
|
-
user_id=user_id,
|
|
2848
|
+
# Check for cancellation after model processing
|
|
2849
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2850
|
+
|
|
2851
|
+
# 3. Update the RunOutput with the model response
|
|
2852
|
+
self._update_run_response(
|
|
2853
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
2642
2854
|
)
|
|
2643
2855
|
|
|
2644
|
-
|
|
2645
|
-
|
|
2856
|
+
# We should break out of the run function
|
|
2857
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
2858
|
+
return self._handle_agent_run_paused(run_response=run_response, session=session, user_id=user_id)
|
|
2646
2859
|
|
|
2647
|
-
|
|
2648
|
-
|
|
2860
|
+
# 4. Convert the response to the structured format if needed
|
|
2861
|
+
self._convert_response_to_structured_format(run_response)
|
|
2649
2862
|
|
|
2650
|
-
|
|
2651
|
-
self.
|
|
2652
|
-
|
|
2653
|
-
run_output=run_response,
|
|
2654
|
-
session_state=session_state,
|
|
2655
|
-
dependencies=dependencies,
|
|
2656
|
-
metadata=metadata,
|
|
2657
|
-
session=session,
|
|
2658
|
-
user_id=user_id,
|
|
2659
|
-
debug_mode=debug_mode,
|
|
2660
|
-
**kwargs,
|
|
2661
|
-
)
|
|
2863
|
+
# 5. Store media if enabled
|
|
2864
|
+
if self.store_media:
|
|
2865
|
+
self._store_media(run_response, model_response)
|
|
2662
2866
|
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2666
|
-
|
|
2867
|
+
# 6. Execute post-hooks
|
|
2868
|
+
if self.post_hooks is not None:
|
|
2869
|
+
post_hook_iterator = self._execute_post_hooks(
|
|
2870
|
+
hooks=self.post_hooks, # type: ignore
|
|
2871
|
+
run_output=run_response,
|
|
2872
|
+
session=session,
|
|
2873
|
+
user_id=user_id,
|
|
2874
|
+
session_state=session_state,
|
|
2875
|
+
dependencies=dependencies,
|
|
2876
|
+
metadata=metadata,
|
|
2877
|
+
debug_mode=debug_mode,
|
|
2878
|
+
**kwargs,
|
|
2879
|
+
)
|
|
2880
|
+
deque(post_hook_iterator, maxlen=0)
|
|
2881
|
+
# Check for cancellation
|
|
2882
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2667
2883
|
|
|
2668
|
-
|
|
2669
|
-
|
|
2670
|
-
|
|
2671
|
-
|
|
2672
|
-
session_id=session.session_id,
|
|
2673
|
-
user_id=user_id,
|
|
2674
|
-
)
|
|
2884
|
+
# 7. Create session summary
|
|
2885
|
+
if self.session_summary_manager is not None:
|
|
2886
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2887
|
+
session.upsert_run(run=run_response)
|
|
2675
2888
|
|
|
2676
|
-
|
|
2677
|
-
|
|
2889
|
+
try:
|
|
2890
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
2891
|
+
except Exception as e:
|
|
2892
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2678
2893
|
|
|
2679
|
-
|
|
2680
|
-
|
|
2681
|
-
run_response=run_response,
|
|
2682
|
-
run_messages=run_messages,
|
|
2683
|
-
session=session,
|
|
2684
|
-
user_id=user_id,
|
|
2685
|
-
)
|
|
2686
|
-
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
2687
|
-
deque(response_iterator, maxlen=0)
|
|
2894
|
+
# Set the run status to completed
|
|
2895
|
+
run_response.status = RunStatus.completed
|
|
2688
2896
|
|
|
2689
|
-
|
|
2690
|
-
|
|
2897
|
+
# 8. Cleanup and store the run response and session
|
|
2898
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
2691
2899
|
|
|
2692
|
-
|
|
2693
|
-
|
|
2900
|
+
# Log Agent Telemetry
|
|
2901
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
2694
2902
|
|
|
2695
|
-
|
|
2903
|
+
return run_response
|
|
2904
|
+
except RunCancelledException as e:
|
|
2905
|
+
# Handle run cancellation during async streaming
|
|
2906
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
2907
|
+
run_response.status = RunStatus.cancelled
|
|
2908
|
+
run_response.content = str(e)
|
|
2909
|
+
|
|
2910
|
+
# Cleanup and store the run response and session
|
|
2911
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
2912
|
+
|
|
2913
|
+
return run_response
|
|
2914
|
+
finally:
|
|
2915
|
+
# Always clean up the run tracking
|
|
2916
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2696
2917
|
|
|
2697
2918
|
def _continue_run_stream(
|
|
2698
2919
|
self,
|
|
@@ -2703,7 +2924,7 @@ class Agent:
|
|
|
2703
2924
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2704
2925
|
user_id: Optional[str] = None,
|
|
2705
2926
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2706
|
-
|
|
2927
|
+
stream_events: bool = False,
|
|
2707
2928
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2708
2929
|
debug_mode: Optional[bool] = None,
|
|
2709
2930
|
**kwargs,
|
|
@@ -2711,100 +2932,149 @@ class Agent:
|
|
|
2711
2932
|
"""Continue a previous run.
|
|
2712
2933
|
|
|
2713
2934
|
Steps:
|
|
2714
|
-
1.
|
|
2715
|
-
2.
|
|
2716
|
-
3.
|
|
2717
|
-
4.
|
|
2718
|
-
5.
|
|
2719
|
-
6.
|
|
2720
|
-
7. Create the run completed event
|
|
2721
|
-
8. Save session to storage
|
|
2935
|
+
1. Resolve dependencies
|
|
2936
|
+
2. Handle any updated tools
|
|
2937
|
+
3. Process model response
|
|
2938
|
+
4. Execute post-hooks
|
|
2939
|
+
5. Create session summary
|
|
2940
|
+
6. Cleanup and store the run response and session
|
|
2722
2941
|
"""
|
|
2723
2942
|
|
|
2943
|
+
# 1. Resolve dependencies
|
|
2724
2944
|
if dependencies is not None:
|
|
2725
2945
|
self._resolve_run_dependencies(dependencies=dependencies)
|
|
2726
2946
|
|
|
2727
2947
|
# Start the Run by yielding a RunContinued event
|
|
2728
|
-
if
|
|
2729
|
-
yield
|
|
2730
|
-
|
|
2731
|
-
|
|
2732
|
-
|
|
2948
|
+
if stream_events:
|
|
2949
|
+
yield handle_event( # type: ignore
|
|
2950
|
+
create_run_continued_event(run_response),
|
|
2951
|
+
run_response,
|
|
2952
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2953
|
+
store_events=self.store_events,
|
|
2954
|
+
)
|
|
2733
2955
|
|
|
2734
|
-
# 2.
|
|
2735
|
-
|
|
2736
|
-
|
|
2737
|
-
|
|
2738
|
-
run_messages=run_messages,
|
|
2739
|
-
response_format=response_format,
|
|
2740
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2741
|
-
):
|
|
2742
|
-
yield event
|
|
2956
|
+
# 2. Handle the updated tools
|
|
2957
|
+
yield from self._handle_tool_call_updates_stream(
|
|
2958
|
+
run_response=run_response, run_messages=run_messages, stream_events=stream_events
|
|
2959
|
+
)
|
|
2743
2960
|
|
|
2744
|
-
|
|
2745
|
-
|
|
2746
|
-
|
|
2961
|
+
try:
|
|
2962
|
+
# 3. Process model response
|
|
2963
|
+
for event in self._handle_model_response_stream(
|
|
2964
|
+
session=session,
|
|
2747
2965
|
run_response=run_response,
|
|
2748
2966
|
run_messages=run_messages,
|
|
2749
|
-
|
|
2750
|
-
|
|
2751
|
-
)
|
|
2752
|
-
|
|
2967
|
+
response_format=response_format,
|
|
2968
|
+
stream_events=stream_events,
|
|
2969
|
+
):
|
|
2970
|
+
yield event
|
|
2753
2971
|
|
|
2754
|
-
|
|
2755
|
-
self.
|
|
2756
|
-
|
|
2757
|
-
run_output=run_response,
|
|
2758
|
-
session_state=session_state,
|
|
2759
|
-
dependencies=dependencies,
|
|
2760
|
-
metadata=metadata,
|
|
2761
|
-
session=session,
|
|
2762
|
-
user_id=user_id,
|
|
2763
|
-
debug_mode=debug_mode,
|
|
2764
|
-
**kwargs,
|
|
2972
|
+
# Parse response with parser model if provided
|
|
2973
|
+
yield from self._parse_response_with_parser_model_stream(
|
|
2974
|
+
session=session, run_response=run_response, stream_events=stream_events
|
|
2765
2975
|
)
|
|
2766
2976
|
|
|
2767
|
-
|
|
2977
|
+
# Yield RunContentCompletedEvent
|
|
2978
|
+
if stream_events:
|
|
2979
|
+
yield handle_event( # type: ignore
|
|
2980
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
2981
|
+
run_response,
|
|
2982
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2983
|
+
store_events=self.store_events,
|
|
2984
|
+
)
|
|
2768
2985
|
|
|
2769
|
-
|
|
2770
|
-
|
|
2771
|
-
|
|
2986
|
+
# We should break out of the run function
|
|
2987
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
2988
|
+
yield from self._handle_agent_run_paused_stream(
|
|
2989
|
+
run_response=run_response, session=session, user_id=user_id
|
|
2990
|
+
)
|
|
2991
|
+
return
|
|
2772
2992
|
|
|
2773
|
-
|
|
2774
|
-
|
|
2993
|
+
# Execute post-hooks
|
|
2994
|
+
if self.post_hooks is not None:
|
|
2995
|
+
yield from self._execute_post_hooks(
|
|
2996
|
+
hooks=self.post_hooks, # type: ignore
|
|
2997
|
+
run_output=run_response,
|
|
2998
|
+
session=session,
|
|
2999
|
+
session_state=session_state,
|
|
3000
|
+
dependencies=dependencies,
|
|
3001
|
+
metadata=metadata,
|
|
3002
|
+
user_id=user_id,
|
|
3003
|
+
debug_mode=debug_mode,
|
|
3004
|
+
**kwargs,
|
|
3005
|
+
)
|
|
2775
3006
|
|
|
2776
|
-
|
|
2777
|
-
|
|
2778
|
-
run_response=run_response,
|
|
2779
|
-
input=run_messages.user_message,
|
|
2780
|
-
session_id=session.session_id,
|
|
2781
|
-
user_id=user_id,
|
|
2782
|
-
)
|
|
3007
|
+
# Check for cancellation before model call
|
|
3008
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2783
3009
|
|
|
2784
|
-
|
|
2785
|
-
|
|
3010
|
+
# 4. Create session summary
|
|
3011
|
+
if self.session_summary_manager is not None:
|
|
3012
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3013
|
+
session.upsert_run(run=run_response)
|
|
2786
3014
|
|
|
2787
|
-
|
|
2788
|
-
|
|
2789
|
-
|
|
2790
|
-
|
|
2791
|
-
|
|
2792
|
-
|
|
2793
|
-
|
|
3015
|
+
if stream_events:
|
|
3016
|
+
yield handle_event( # type: ignore
|
|
3017
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
3018
|
+
run_response,
|
|
3019
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3020
|
+
store_events=self.store_events,
|
|
3021
|
+
)
|
|
3022
|
+
try:
|
|
3023
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
3024
|
+
except Exception as e:
|
|
3025
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2794
3026
|
|
|
2795
|
-
|
|
2796
|
-
|
|
3027
|
+
if stream_events:
|
|
3028
|
+
yield handle_event( # type: ignore
|
|
3029
|
+
create_session_summary_completed_event(
|
|
3030
|
+
from_run_response=run_response, session_summary=session.summary
|
|
3031
|
+
),
|
|
3032
|
+
run_response,
|
|
3033
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3034
|
+
store_events=self.store_events,
|
|
3035
|
+
)
|
|
2797
3036
|
|
|
2798
|
-
|
|
2799
|
-
|
|
3037
|
+
# Create the run completed event
|
|
3038
|
+
completed_event = handle_event(
|
|
3039
|
+
create_run_completed_event(run_response),
|
|
3040
|
+
run_response,
|
|
3041
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3042
|
+
store_events=self.store_events,
|
|
3043
|
+
)
|
|
3044
|
+
|
|
3045
|
+
# Set the run status to completed
|
|
3046
|
+
run_response.status = RunStatus.completed
|
|
3047
|
+
|
|
3048
|
+
# 5. Cleanup and store the run response and session
|
|
3049
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
3050
|
+
|
|
3051
|
+
if stream_events:
|
|
3052
|
+
yield completed_event # type: ignore
|
|
3053
|
+
|
|
3054
|
+
# Log Agent Telemetry
|
|
3055
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
2800
3056
|
|
|
2801
|
-
|
|
2802
|
-
yield completed_event
|
|
3057
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
2803
3058
|
|
|
2804
|
-
|
|
2805
|
-
|
|
3059
|
+
except RunCancelledException as e:
|
|
3060
|
+
# Handle run cancellation during async streaming
|
|
3061
|
+
log_info(f"Run {run_response.run_id} was cancelled during streaming")
|
|
3062
|
+
run_response.status = RunStatus.cancelled
|
|
3063
|
+
run_response.content = str(e)
|
|
2806
3064
|
|
|
2807
|
-
|
|
3065
|
+
# Yield the cancellation event
|
|
3066
|
+
yield handle_event( # type: ignore
|
|
3067
|
+
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
3068
|
+
run_response,
|
|
3069
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3070
|
+
store_events=self.store_events,
|
|
3071
|
+
)
|
|
3072
|
+
|
|
3073
|
+
# Cleanup and store the run response and session
|
|
3074
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
3075
|
+
finally:
|
|
3076
|
+
# Always clean up the run tracking
|
|
3077
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2808
3078
|
|
|
2809
3079
|
@overload
|
|
2810
3080
|
async def acontinue_run(
|
|
@@ -2812,6 +3082,7 @@ class Agent:
|
|
|
2812
3082
|
run_response: Optional[RunOutput] = None,
|
|
2813
3083
|
*,
|
|
2814
3084
|
stream: Literal[False] = False,
|
|
3085
|
+
stream_events: Optional[bool] = None,
|
|
2815
3086
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2816
3087
|
run_id: Optional[str] = None,
|
|
2817
3088
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
@@ -2830,6 +3101,7 @@ class Agent:
|
|
|
2830
3101
|
run_response: Optional[RunOutput] = None,
|
|
2831
3102
|
*,
|
|
2832
3103
|
stream: Literal[True] = True,
|
|
3104
|
+
stream_events: Optional[bool] = None,
|
|
2833
3105
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2834
3106
|
run_id: Optional[str] = None,
|
|
2835
3107
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
@@ -2849,6 +3121,7 @@ class Agent:
|
|
|
2849
3121
|
run_id: Optional[str] = None,
|
|
2850
3122
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2851
3123
|
stream: Optional[bool] = None,
|
|
3124
|
+
stream_events: Optional[bool] = None,
|
|
2852
3125
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2853
3126
|
user_id: Optional[str] = None,
|
|
2854
3127
|
session_id: Optional[str] = None,
|
|
@@ -2867,13 +3140,16 @@ class Agent:
|
|
|
2867
3140
|
run_id: The run id to continue. Alternative to passing run_response.
|
|
2868
3141
|
updated_tools: The updated tools to use for the run. Required to be used with `run_id`.
|
|
2869
3142
|
stream: Whether to stream the response.
|
|
2870
|
-
|
|
3143
|
+
stream_events: Whether to stream all events.
|
|
2871
3144
|
user_id: The user id to continue the run for.
|
|
2872
3145
|
session_id: The session id to continue the run for.
|
|
2873
3146
|
retries: The number of retries to continue the run for.
|
|
2874
3147
|
knowledge_filters: The knowledge filters to use for the run.
|
|
2875
3148
|
dependencies: The dependencies to use for continuing the run.
|
|
3149
|
+
metadata: The metadata to use for continuing the run.
|
|
2876
3150
|
debug_mode: Whether to enable debug mode.
|
|
3151
|
+
yield_run_response: Whether to yield the run response.
|
|
3152
|
+
(deprecated) stream_intermediate_steps: Whether to stream all steps.
|
|
2877
3153
|
"""
|
|
2878
3154
|
if run_response is None and run_id is None:
|
|
2879
3155
|
raise ValueError("Either run_response or run_id must be provided.")
|
|
@@ -2881,10 +3157,9 @@ class Agent:
|
|
|
2881
3157
|
if run_response is None and (run_id is not None and (session_id is None and self.session_id is None)):
|
|
2882
3158
|
raise ValueError("Session ID is required to continue a run from a run_id.")
|
|
2883
3159
|
|
|
2884
|
-
session_id, user_id
|
|
2885
|
-
run_id=run_id, # type: ignore
|
|
3160
|
+
session_id, user_id = self._initialize_session(
|
|
2886
3161
|
session_id=session_id,
|
|
2887
|
-
user_id=user_id,
|
|
3162
|
+
user_id=user_id,
|
|
2888
3163
|
)
|
|
2889
3164
|
|
|
2890
3165
|
# Initialize the Agent
|
|
@@ -2899,17 +3174,22 @@ class Agent:
|
|
|
2899
3174
|
if stream is None:
|
|
2900
3175
|
stream = False if self.stream is None else self.stream
|
|
2901
3176
|
|
|
2902
|
-
|
|
2903
|
-
|
|
2904
|
-
|
|
2905
|
-
|
|
3177
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
3178
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
3179
|
+
|
|
3180
|
+
# Can't stream events if streaming is disabled
|
|
3181
|
+
if stream is False:
|
|
3182
|
+
stream_events = False
|
|
3183
|
+
|
|
3184
|
+
if stream_events is None:
|
|
3185
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2906
3186
|
|
|
2907
3187
|
# Can't have stream_intermediate_steps if stream is False
|
|
2908
3188
|
if stream is False:
|
|
2909
|
-
|
|
3189
|
+
stream_events = False
|
|
2910
3190
|
|
|
2911
3191
|
self.stream = self.stream or stream
|
|
2912
|
-
self.
|
|
3192
|
+
self.stream_events = self.stream_events or stream_events
|
|
2913
3193
|
|
|
2914
3194
|
# Get knowledge filters
|
|
2915
3195
|
effective_filters = knowledge_filters
|
|
@@ -2936,14 +3216,13 @@ class Agent:
|
|
|
2936
3216
|
run_response=run_response,
|
|
2937
3217
|
updated_tools=updated_tools,
|
|
2938
3218
|
knowledge_filters=effective_filters,
|
|
2939
|
-
session_state=session_state,
|
|
2940
3219
|
run_id=run_id,
|
|
2941
3220
|
user_id=user_id,
|
|
2942
3221
|
session_id=session_id,
|
|
2943
3222
|
response_format=response_format,
|
|
2944
3223
|
dependencies=run_dependencies,
|
|
3224
|
+
stream_events=stream_events,
|
|
2945
3225
|
metadata=metadata,
|
|
2946
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2947
3226
|
yield_run_response=yield_run_response,
|
|
2948
3227
|
debug_mode=debug_mode,
|
|
2949
3228
|
**kwargs,
|
|
@@ -2954,7 +3233,6 @@ class Agent:
|
|
|
2954
3233
|
run_response=run_response,
|
|
2955
3234
|
updated_tools=updated_tools,
|
|
2956
3235
|
knowledge_filters=effective_filters,
|
|
2957
|
-
session_state=session_state,
|
|
2958
3236
|
run_id=run_id,
|
|
2959
3237
|
user_id=user_id,
|
|
2960
3238
|
response_format=response_format,
|
|
@@ -3006,7 +3284,6 @@ class Agent:
|
|
|
3006
3284
|
run_response: Optional[RunOutput] = None,
|
|
3007
3285
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
3008
3286
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
3009
|
-
session_state: Optional[Dict[str, Any]] = None,
|
|
3010
3287
|
run_id: Optional[str] = None,
|
|
3011
3288
|
user_id: Optional[str] = None,
|
|
3012
3289
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
@@ -3027,18 +3304,16 @@ class Agent:
|
|
|
3027
3304
|
7. Handle the updated tools
|
|
3028
3305
|
8. Get model response
|
|
3029
3306
|
9. Update the RunOutput with the model response
|
|
3030
|
-
10.
|
|
3031
|
-
11.
|
|
3032
|
-
12.
|
|
3033
|
-
13.
|
|
3307
|
+
10. Convert response to structured format
|
|
3308
|
+
11. Store media if enabled
|
|
3309
|
+
12. Execute post-hooks
|
|
3310
|
+
13. Create session summary
|
|
3311
|
+
14. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
3034
3312
|
"""
|
|
3035
3313
|
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
|
|
3036
3314
|
|
|
3037
3315
|
# 1. Read existing session from db
|
|
3038
|
-
|
|
3039
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
3040
|
-
else:
|
|
3041
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
3316
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
3042
3317
|
|
|
3043
3318
|
# 2. Resolve dependencies
|
|
3044
3319
|
if dependencies is not None:
|
|
@@ -3046,6 +3321,11 @@ class Agent:
|
|
|
3046
3321
|
|
|
3047
3322
|
# 3. Update metadata and session state
|
|
3048
3323
|
self._update_metadata(session=agent_session)
|
|
3324
|
+
# Initialize session state
|
|
3325
|
+
session_state = self._initialize_session_state(
|
|
3326
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
3327
|
+
)
|
|
3328
|
+
# Update session state from DB
|
|
3049
3329
|
if session_state is not None:
|
|
3050
3330
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
3051
3331
|
|
|
@@ -3120,30 +3400,24 @@ class Agent:
|
|
|
3120
3400
|
run_messages=run_messages,
|
|
3121
3401
|
)
|
|
3122
3402
|
|
|
3123
|
-
if self.store_media:
|
|
3124
|
-
self._store_media(run_response, model_response)
|
|
3125
|
-
else:
|
|
3126
|
-
self._scrub_media_from_run_output(run_response)
|
|
3127
|
-
|
|
3128
3403
|
# Break out of the run function if a tool call is paused
|
|
3129
3404
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
3130
|
-
return self.
|
|
3131
|
-
run_response=run_response,
|
|
3132
|
-
run_messages=run_messages,
|
|
3133
|
-
session=agent_session,
|
|
3134
|
-
user_id=user_id,
|
|
3405
|
+
return await self._ahandle_agent_run_paused(
|
|
3406
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
3135
3407
|
)
|
|
3136
|
-
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3137
3408
|
|
|
3138
|
-
# 10.
|
|
3139
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
3140
|
-
|
|
3141
|
-
# Convert the response to the structured format if needed
|
|
3409
|
+
# 10. Convert the response to the structured format if needed
|
|
3142
3410
|
self._convert_response_to_structured_format(run_response)
|
|
3143
3411
|
|
|
3144
|
-
# 11.
|
|
3412
|
+
# 11. Store media if enabled
|
|
3413
|
+
if self.store_media:
|
|
3414
|
+
self._store_media(run_response, model_response)
|
|
3415
|
+
|
|
3416
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3417
|
+
|
|
3418
|
+
# 12. Execute post-hooks
|
|
3145
3419
|
if self.post_hooks is not None:
|
|
3146
|
-
|
|
3420
|
+
async for _ in self._aexecute_post_hooks(
|
|
3147
3421
|
hooks=self.post_hooks, # type: ignore
|
|
3148
3422
|
run_output=run_response,
|
|
3149
3423
|
session=agent_session,
|
|
@@ -3153,37 +3427,27 @@ class Agent:
|
|
|
3153
3427
|
dependencies=dependencies,
|
|
3154
3428
|
metadata=metadata,
|
|
3155
3429
|
**kwargs,
|
|
3156
|
-
)
|
|
3157
|
-
|
|
3158
|
-
run_response.status = RunStatus.completed
|
|
3430
|
+
):
|
|
3431
|
+
pass
|
|
3159
3432
|
|
|
3160
|
-
|
|
3161
|
-
|
|
3433
|
+
# Check for cancellation
|
|
3434
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3162
3435
|
|
|
3163
|
-
#
|
|
3164
|
-
|
|
3165
|
-
|
|
3166
|
-
|
|
3167
|
-
session=agent_session,
|
|
3168
|
-
user_id=user_id,
|
|
3169
|
-
):
|
|
3170
|
-
pass
|
|
3436
|
+
# 13. Create session summary
|
|
3437
|
+
if self.session_summary_manager is not None:
|
|
3438
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3439
|
+
agent_session.upsert_run(run=run_response)
|
|
3171
3440
|
|
|
3172
|
-
|
|
3173
|
-
|
|
3174
|
-
|
|
3175
|
-
|
|
3176
|
-
session_id=agent_session.session_id,
|
|
3177
|
-
user_id=user_id,
|
|
3178
|
-
)
|
|
3441
|
+
try:
|
|
3442
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
3443
|
+
except Exception as e:
|
|
3444
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
3179
3445
|
|
|
3180
|
-
|
|
3446
|
+
# Set the run status to completed
|
|
3447
|
+
run_response.status = RunStatus.completed
|
|
3181
3448
|
|
|
3182
|
-
#
|
|
3183
|
-
|
|
3184
|
-
await self.asave_session(session=agent_session)
|
|
3185
|
-
else:
|
|
3186
|
-
self.save_session(session=agent_session)
|
|
3449
|
+
# 14. Cleanup and store the run response and session
|
|
3450
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3187
3451
|
|
|
3188
3452
|
# Log Agent Telemetry
|
|
3189
3453
|
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
@@ -3198,12 +3462,8 @@ class Agent:
|
|
|
3198
3462
|
run_response.content = str(e)
|
|
3199
3463
|
run_response.status = RunStatus.cancelled
|
|
3200
3464
|
|
|
3201
|
-
#
|
|
3202
|
-
|
|
3203
|
-
if self._has_async_db():
|
|
3204
|
-
await self.asave_session(session=agent_session)
|
|
3205
|
-
else:
|
|
3206
|
-
self.save_session(session=agent_session)
|
|
3465
|
+
# Cleanup and store the run response and session
|
|
3466
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3207
3467
|
|
|
3208
3468
|
return run_response
|
|
3209
3469
|
finally:
|
|
@@ -3216,11 +3476,10 @@ class Agent:
|
|
|
3216
3476
|
run_response: Optional[RunOutput] = None,
|
|
3217
3477
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
3218
3478
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
3219
|
-
session_state: Optional[Dict[str, Any]] = None,
|
|
3220
3479
|
run_id: Optional[str] = None,
|
|
3221
3480
|
user_id: Optional[str] = None,
|
|
3222
3481
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
3223
|
-
|
|
3482
|
+
stream_events: bool = False,
|
|
3224
3483
|
yield_run_response: Optional[bool] = None,
|
|
3225
3484
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3226
3485
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -3238,12 +3497,9 @@ class Agent:
|
|
|
3238
3497
|
6. Prepare run messages
|
|
3239
3498
|
7. Handle the updated tools
|
|
3240
3499
|
8. Process model response
|
|
3241
|
-
9.
|
|
3242
|
-
10.
|
|
3243
|
-
11.
|
|
3244
|
-
12. Create the run completed event
|
|
3245
|
-
13. Add the RunOutput to Agent Session
|
|
3246
|
-
14. Save session to storage
|
|
3500
|
+
9. Create session summary
|
|
3501
|
+
10. Execute post-hooks
|
|
3502
|
+
11. Cleanup and store the run response and session
|
|
3247
3503
|
"""
|
|
3248
3504
|
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
|
|
3249
3505
|
|
|
@@ -3256,6 +3512,11 @@ class Agent:
|
|
|
3256
3512
|
|
|
3257
3513
|
# 3. Update session state and metadata
|
|
3258
3514
|
self._update_metadata(session=agent_session)
|
|
3515
|
+
# Initialize session state
|
|
3516
|
+
session_state = self._initialize_session_state(
|
|
3517
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
3518
|
+
)
|
|
3519
|
+
# Update session state from DB
|
|
3259
3520
|
if session_state is not None:
|
|
3260
3521
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
3261
3522
|
|
|
@@ -3303,8 +3564,13 @@ class Agent:
|
|
|
3303
3564
|
|
|
3304
3565
|
try:
|
|
3305
3566
|
# Start the Run by yielding a RunContinued event
|
|
3306
|
-
if
|
|
3307
|
-
yield
|
|
3567
|
+
if stream_events:
|
|
3568
|
+
yield handle_event( # type: ignore
|
|
3569
|
+
create_run_continued_event(run_response),
|
|
3570
|
+
run_response,
|
|
3571
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3572
|
+
store_events=self.store_events,
|
|
3573
|
+
)
|
|
3308
3574
|
|
|
3309
3575
|
# 7. Handle the updated tools
|
|
3310
3576
|
async for event in self._ahandle_tool_call_updates_stream(
|
|
@@ -3320,7 +3586,7 @@ class Agent:
|
|
|
3320
3586
|
run_response=run_response,
|
|
3321
3587
|
run_messages=run_messages,
|
|
3322
3588
|
response_format=response_format,
|
|
3323
|
-
|
|
3589
|
+
stream_events=stream_events,
|
|
3324
3590
|
):
|
|
3325
3591
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3326
3592
|
yield event
|
|
@@ -3335,11 +3601,11 @@ class Agent:
|
|
|
3335
3601
|
run_response=run_response,
|
|
3336
3602
|
run_messages=run_messages,
|
|
3337
3603
|
response_format=response_format,
|
|
3338
|
-
|
|
3604
|
+
stream_events=stream_events,
|
|
3339
3605
|
):
|
|
3340
3606
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3341
3607
|
if isinstance(event, RunContentEvent):
|
|
3342
|
-
if
|
|
3608
|
+
if stream_events:
|
|
3343
3609
|
yield IntermediateRunContentEvent(
|
|
3344
3610
|
content=event.content,
|
|
3345
3611
|
content_type=event.content_type,
|
|
@@ -3352,7 +3618,7 @@ class Agent:
|
|
|
3352
3618
|
session=agent_session,
|
|
3353
3619
|
run_response=run_response,
|
|
3354
3620
|
run_messages=run_messages,
|
|
3355
|
-
|
|
3621
|
+
stream_events=stream_events,
|
|
3356
3622
|
):
|
|
3357
3623
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3358
3624
|
yield event
|
|
@@ -3360,71 +3626,88 @@ class Agent:
|
|
|
3360
3626
|
# Check for cancellation after model processing
|
|
3361
3627
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3362
3628
|
|
|
3629
|
+
# Parse response with parser model if provided
|
|
3630
|
+
async for event in self._aparse_response_with_parser_model_stream(
|
|
3631
|
+
session=agent_session, run_response=run_response, stream_events=stream_events
|
|
3632
|
+
):
|
|
3633
|
+
yield event
|
|
3634
|
+
|
|
3635
|
+
# Yield RunContentCompletedEvent
|
|
3636
|
+
if stream_events:
|
|
3637
|
+
yield handle_event( # type: ignore
|
|
3638
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
3639
|
+
run_response,
|
|
3640
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3641
|
+
store_events=self.store_events,
|
|
3642
|
+
)
|
|
3643
|
+
|
|
3363
3644
|
# Break out of the run function if a tool call is paused
|
|
3364
3645
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
3365
|
-
for item in self.
|
|
3366
|
-
run_response=run_response,
|
|
3367
|
-
run_messages=run_messages,
|
|
3368
|
-
session=agent_session,
|
|
3369
|
-
user_id=user_id,
|
|
3646
|
+
async for item in self._ahandle_agent_run_paused_stream(
|
|
3647
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
3370
3648
|
):
|
|
3371
3649
|
yield item
|
|
3372
3650
|
return
|
|
3373
3651
|
|
|
3374
|
-
#
|
|
3375
|
-
completed_event = self._handle_event(create_run_completed_event(run_response), run_response)
|
|
3376
|
-
|
|
3377
|
-
# 10. Execute post-hooks
|
|
3652
|
+
# 8. Execute post-hooks
|
|
3378
3653
|
if self.post_hooks is not None:
|
|
3379
|
-
|
|
3654
|
+
async for event in self._aexecute_post_hooks(
|
|
3380
3655
|
hooks=self.post_hooks, # type: ignore
|
|
3381
3656
|
run_output=run_response,
|
|
3382
3657
|
session=agent_session,
|
|
3383
3658
|
user_id=user_id,
|
|
3384
|
-
debug_mode=debug_mode,
|
|
3385
3659
|
session_state=session_state,
|
|
3386
3660
|
dependencies=dependencies,
|
|
3387
3661
|
metadata=metadata,
|
|
3662
|
+
debug_mode=debug_mode,
|
|
3388
3663
|
**kwargs,
|
|
3389
|
-
)
|
|
3390
|
-
|
|
3391
|
-
|
|
3664
|
+
):
|
|
3665
|
+
yield event
|
|
3666
|
+
# Check for cancellation before model call
|
|
3667
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3392
3668
|
|
|
3393
|
-
#
|
|
3394
|
-
if
|
|
3395
|
-
|
|
3669
|
+
# 9. Create session summary
|
|
3670
|
+
if self.session_summary_manager is not None:
|
|
3671
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3672
|
+
agent_session.upsert_run(run=run_response)
|
|
3396
3673
|
|
|
3397
|
-
|
|
3398
|
-
|
|
3674
|
+
if stream_events:
|
|
3675
|
+
yield handle_event( # type: ignore
|
|
3676
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
3677
|
+
run_response,
|
|
3678
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3679
|
+
store_events=self.store_events,
|
|
3680
|
+
)
|
|
3681
|
+
try:
|
|
3682
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
3683
|
+
except Exception as e:
|
|
3684
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
3685
|
+
if stream_events:
|
|
3686
|
+
yield handle_event( # type: ignore
|
|
3687
|
+
create_session_summary_completed_event(
|
|
3688
|
+
from_run_response=run_response, session_summary=agent_session.summary
|
|
3689
|
+
),
|
|
3690
|
+
run_response,
|
|
3691
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3692
|
+
store_events=self.store_events,
|
|
3693
|
+
)
|
|
3399
3694
|
|
|
3400
|
-
#
|
|
3401
|
-
|
|
3402
|
-
run_response
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3695
|
+
# Create the run completed event
|
|
3696
|
+
completed_event = handle_event(
|
|
3697
|
+
create_run_completed_event(run_response),
|
|
3698
|
+
run_response,
|
|
3699
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3700
|
+
store_events=self.store_events,
|
|
3406
3701
|
)
|
|
3407
3702
|
|
|
3408
|
-
#
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
# 12. Update Agent Memory
|
|
3412
|
-
async for event in self._amake_memories_cultural_knowledge_and_summaries(
|
|
3413
|
-
run_response=run_response,
|
|
3414
|
-
run_messages=run_messages,
|
|
3415
|
-
session=agent_session,
|
|
3416
|
-
user_id=user_id,
|
|
3417
|
-
):
|
|
3418
|
-
yield event
|
|
3703
|
+
# Set the run status to completed
|
|
3704
|
+
run_response.status = RunStatus.completed
|
|
3419
3705
|
|
|
3420
|
-
#
|
|
3421
|
-
|
|
3422
|
-
await self.asave_session(session=agent_session)
|
|
3423
|
-
else:
|
|
3424
|
-
self.save_session(session=agent_session)
|
|
3706
|
+
# 10. Cleanup and store the run response and session
|
|
3707
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3425
3708
|
|
|
3426
|
-
if
|
|
3427
|
-
yield completed_event
|
|
3709
|
+
if stream_events:
|
|
3710
|
+
yield completed_event # type: ignore
|
|
3428
3711
|
|
|
3429
3712
|
if yield_run_response:
|
|
3430
3713
|
yield run_response
|
|
@@ -3440,17 +3723,15 @@ class Agent:
|
|
|
3440
3723
|
run_response.content = str(e)
|
|
3441
3724
|
|
|
3442
3725
|
# Yield the cancellation event
|
|
3443
|
-
yield
|
|
3726
|
+
yield handle_event( # type: ignore
|
|
3444
3727
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
3445
3728
|
run_response,
|
|
3729
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3730
|
+
store_events=self.store_events,
|
|
3446
3731
|
)
|
|
3447
3732
|
|
|
3448
|
-
#
|
|
3449
|
-
|
|
3450
|
-
if self._has_async_db():
|
|
3451
|
-
await self.asave_session(session=agent_session)
|
|
3452
|
-
else:
|
|
3453
|
-
self.save_session(session=agent_session)
|
|
3733
|
+
# Cleanup and store the run response and session
|
|
3734
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3454
3735
|
finally:
|
|
3455
3736
|
# Always clean up the run tracking
|
|
3456
3737
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -3486,13 +3767,15 @@ class Agent:
|
|
|
3486
3767
|
all_args.update(kwargs)
|
|
3487
3768
|
|
|
3488
3769
|
for i, hook in enumerate(hooks):
|
|
3489
|
-
yield
|
|
3770
|
+
yield handle_event( # type: ignore
|
|
3490
3771
|
run_response=run_response,
|
|
3491
3772
|
event=create_pre_hook_started_event(
|
|
3492
3773
|
from_run_response=run_response,
|
|
3493
3774
|
run_input=run_input,
|
|
3494
3775
|
pre_hook_name=hook.__name__,
|
|
3495
3776
|
),
|
|
3777
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3778
|
+
store_events=self.store_events,
|
|
3496
3779
|
)
|
|
3497
3780
|
try:
|
|
3498
3781
|
# Filter arguments to only include those that the hook accepts
|
|
@@ -3500,13 +3783,15 @@ class Agent:
|
|
|
3500
3783
|
|
|
3501
3784
|
hook(**filtered_args)
|
|
3502
3785
|
|
|
3503
|
-
yield
|
|
3786
|
+
yield handle_event( # type: ignore
|
|
3504
3787
|
run_response=run_response,
|
|
3505
3788
|
event=create_pre_hook_completed_event(
|
|
3506
3789
|
from_run_response=run_response,
|
|
3507
3790
|
run_input=run_input,
|
|
3508
3791
|
pre_hook_name=hook.__name__,
|
|
3509
3792
|
),
|
|
3793
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3794
|
+
store_events=self.store_events,
|
|
3510
3795
|
)
|
|
3511
3796
|
|
|
3512
3797
|
except (InputCheckError, OutputCheckError) as e:
|
|
@@ -3552,13 +3837,15 @@ class Agent:
|
|
|
3552
3837
|
all_args.update(kwargs)
|
|
3553
3838
|
|
|
3554
3839
|
for i, hook in enumerate(hooks):
|
|
3555
|
-
yield
|
|
3840
|
+
yield handle_event( # type: ignore
|
|
3556
3841
|
run_response=run_response,
|
|
3557
3842
|
event=create_pre_hook_started_event(
|
|
3558
3843
|
from_run_response=run_response,
|
|
3559
3844
|
run_input=run_input,
|
|
3560
3845
|
pre_hook_name=hook.__name__,
|
|
3561
3846
|
),
|
|
3847
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3848
|
+
store_events=self.store_events,
|
|
3562
3849
|
)
|
|
3563
3850
|
try:
|
|
3564
3851
|
# Filter arguments to only include those that the hook accepts
|
|
@@ -3570,13 +3857,15 @@ class Agent:
|
|
|
3570
3857
|
# Synchronous function
|
|
3571
3858
|
hook(**filtered_args)
|
|
3572
3859
|
|
|
3573
|
-
yield
|
|
3860
|
+
yield handle_event( # type: ignore
|
|
3574
3861
|
run_response=run_response,
|
|
3575
3862
|
event=create_pre_hook_completed_event(
|
|
3576
3863
|
from_run_response=run_response,
|
|
3577
3864
|
run_input=run_input,
|
|
3578
3865
|
pre_hook_name=hook.__name__,
|
|
3579
3866
|
),
|
|
3867
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3868
|
+
store_events=self.store_events,
|
|
3580
3869
|
)
|
|
3581
3870
|
|
|
3582
3871
|
except (InputCheckError, OutputCheckError) as e:
|
|
@@ -3602,7 +3891,7 @@ class Agent:
|
|
|
3602
3891
|
user_id: Optional[str] = None,
|
|
3603
3892
|
debug_mode: Optional[bool] = None,
|
|
3604
3893
|
**kwargs: Any,
|
|
3605
|
-
) ->
|
|
3894
|
+
) -> Iterator[RunOutputEvent]:
|
|
3606
3895
|
"""Execute multiple post-hook functions in succession."""
|
|
3607
3896
|
if hooks is None:
|
|
3608
3897
|
return
|
|
@@ -3621,11 +3910,30 @@ class Agent:
|
|
|
3621
3910
|
all_args.update(kwargs)
|
|
3622
3911
|
|
|
3623
3912
|
for i, hook in enumerate(hooks):
|
|
3913
|
+
yield handle_event( # type: ignore
|
|
3914
|
+
run_response=run_output,
|
|
3915
|
+
event=create_post_hook_started_event(
|
|
3916
|
+
from_run_response=run_output,
|
|
3917
|
+
post_hook_name=hook.__name__,
|
|
3918
|
+
),
|
|
3919
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3920
|
+
store_events=self.store_events,
|
|
3921
|
+
)
|
|
3624
3922
|
try:
|
|
3625
3923
|
# Filter arguments to only include those that the hook accepts
|
|
3626
3924
|
filtered_args = filter_hook_args(hook, all_args)
|
|
3627
3925
|
|
|
3628
3926
|
hook(**filtered_args)
|
|
3927
|
+
|
|
3928
|
+
yield handle_event( # type: ignore
|
|
3929
|
+
run_response=run_output,
|
|
3930
|
+
event=create_post_hook_completed_event(
|
|
3931
|
+
from_run_response=run_output,
|
|
3932
|
+
post_hook_name=hook.__name__,
|
|
3933
|
+
),
|
|
3934
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3935
|
+
store_events=self.store_events,
|
|
3936
|
+
)
|
|
3629
3937
|
except (InputCheckError, OutputCheckError) as e:
|
|
3630
3938
|
raise e
|
|
3631
3939
|
except Exception as e:
|
|
@@ -3646,7 +3954,7 @@ class Agent:
|
|
|
3646
3954
|
user_id: Optional[str] = None,
|
|
3647
3955
|
debug_mode: Optional[bool] = None,
|
|
3648
3956
|
**kwargs: Any,
|
|
3649
|
-
) ->
|
|
3957
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
3650
3958
|
"""Execute multiple post-hook functions in succession (async version)."""
|
|
3651
3959
|
if hooks is None:
|
|
3652
3960
|
return
|
|
@@ -3665,6 +3973,15 @@ class Agent:
|
|
|
3665
3973
|
all_args.update(kwargs)
|
|
3666
3974
|
|
|
3667
3975
|
for i, hook in enumerate(hooks):
|
|
3976
|
+
yield handle_event( # type: ignore
|
|
3977
|
+
run_response=run_output,
|
|
3978
|
+
event=create_post_hook_started_event(
|
|
3979
|
+
from_run_response=run_output,
|
|
3980
|
+
post_hook_name=hook.__name__,
|
|
3981
|
+
),
|
|
3982
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3983
|
+
store_events=self.store_events,
|
|
3984
|
+
)
|
|
3668
3985
|
try:
|
|
3669
3986
|
# Filter arguments to only include those that the hook accepts
|
|
3670
3987
|
filtered_args = filter_hook_args(hook, all_args)
|
|
@@ -3674,6 +3991,16 @@ class Agent:
|
|
|
3674
3991
|
else:
|
|
3675
3992
|
hook(**filtered_args)
|
|
3676
3993
|
|
|
3994
|
+
yield handle_event( # type: ignore
|
|
3995
|
+
run_response=run_output,
|
|
3996
|
+
event=create_post_hook_completed_event(
|
|
3997
|
+
from_run_response=run_output,
|
|
3998
|
+
post_hook_name=hook.__name__,
|
|
3999
|
+
),
|
|
4000
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4001
|
+
store_events=self.store_events,
|
|
4002
|
+
)
|
|
4003
|
+
|
|
3677
4004
|
except (InputCheckError, OutputCheckError) as e:
|
|
3678
4005
|
raise e
|
|
3679
4006
|
except Exception as e:
|
|
@@ -3686,7 +4013,6 @@ class Agent:
|
|
|
3686
4013
|
def _handle_agent_run_paused(
|
|
3687
4014
|
self,
|
|
3688
4015
|
run_response: RunOutput,
|
|
3689
|
-
run_messages: RunMessages,
|
|
3690
4016
|
session: AgentSession,
|
|
3691
4017
|
user_id: Optional[str] = None,
|
|
3692
4018
|
) -> RunOutput:
|
|
@@ -3696,18 +4022,7 @@ class Agent:
|
|
|
3696
4022
|
if not run_response.content:
|
|
3697
4023
|
run_response.content = get_paused_content(run_response)
|
|
3698
4024
|
|
|
3699
|
-
|
|
3700
|
-
self.save_run_response_to_file(
|
|
3701
|
-
run_response=run_response,
|
|
3702
|
-
input=run_messages.user_message,
|
|
3703
|
-
session_id=session.session_id,
|
|
3704
|
-
user_id=user_id,
|
|
3705
|
-
)
|
|
3706
|
-
|
|
3707
|
-
session.upsert_run(run=run_response)
|
|
3708
|
-
|
|
3709
|
-
# Save session to storage
|
|
3710
|
-
self.save_session(session=session)
|
|
4025
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
3711
4026
|
|
|
3712
4027
|
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
3713
4028
|
|
|
@@ -3717,7 +4032,6 @@ class Agent:
|
|
|
3717
4032
|
def _handle_agent_run_paused_stream(
|
|
3718
4033
|
self,
|
|
3719
4034
|
run_response: RunOutput,
|
|
3720
|
-
run_messages: RunMessages,
|
|
3721
4035
|
session: AgentSession,
|
|
3722
4036
|
user_id: Optional[str] = None,
|
|
3723
4037
|
) -> Iterator[RunOutputEvent]:
|
|
@@ -3728,26 +4042,67 @@ class Agent:
|
|
|
3728
4042
|
run_response.content = get_paused_content(run_response)
|
|
3729
4043
|
|
|
3730
4044
|
# We return and await confirmation/completion for the tools that require it
|
|
3731
|
-
pause_event =
|
|
4045
|
+
pause_event = handle_event(
|
|
3732
4046
|
create_run_paused_event(
|
|
3733
4047
|
from_run_response=run_response,
|
|
3734
4048
|
tools=run_response.tools,
|
|
3735
4049
|
),
|
|
3736
4050
|
run_response,
|
|
4051
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4052
|
+
store_events=self.store_events,
|
|
3737
4053
|
)
|
|
3738
4054
|
|
|
3739
|
-
|
|
3740
|
-
|
|
3741
|
-
|
|
3742
|
-
|
|
3743
|
-
|
|
3744
|
-
|
|
4055
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4056
|
+
|
|
4057
|
+
yield pause_event # type: ignore
|
|
4058
|
+
|
|
4059
|
+
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
4060
|
+
|
|
4061
|
+
async def _ahandle_agent_run_paused(
|
|
4062
|
+
self,
|
|
4063
|
+
run_response: RunOutput,
|
|
4064
|
+
session: AgentSession,
|
|
4065
|
+
user_id: Optional[str] = None,
|
|
4066
|
+
) -> RunOutput:
|
|
4067
|
+
# Set the run response to paused
|
|
4068
|
+
|
|
4069
|
+
run_response.status = RunStatus.paused
|
|
4070
|
+
if not run_response.content:
|
|
4071
|
+
run_response.content = get_paused_content(run_response)
|
|
4072
|
+
|
|
4073
|
+
await self._acleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4074
|
+
|
|
4075
|
+
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
4076
|
+
|
|
4077
|
+
# We return and await confirmation/completion for the tools that require it
|
|
4078
|
+
return run_response
|
|
4079
|
+
|
|
4080
|
+
async def _ahandle_agent_run_paused_stream(
|
|
4081
|
+
self,
|
|
4082
|
+
run_response: RunOutput,
|
|
4083
|
+
session: AgentSession,
|
|
4084
|
+
user_id: Optional[str] = None,
|
|
4085
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
4086
|
+
# Set the run response to paused
|
|
4087
|
+
|
|
4088
|
+
run_response.status = RunStatus.paused
|
|
4089
|
+
if not run_response.content:
|
|
4090
|
+
run_response.content = get_paused_content(run_response)
|
|
4091
|
+
|
|
4092
|
+
# We return and await confirmation/completion for the tools that require it
|
|
4093
|
+
pause_event = handle_event(
|
|
4094
|
+
create_run_paused_event(
|
|
4095
|
+
from_run_response=run_response,
|
|
4096
|
+
tools=run_response.tools,
|
|
4097
|
+
),
|
|
4098
|
+
run_response,
|
|
4099
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4100
|
+
store_events=self.store_events,
|
|
3745
4101
|
)
|
|
3746
|
-
session.upsert_run(run=run_response)
|
|
3747
|
-
# Save session to storage
|
|
3748
|
-
self.save_session(session=session)
|
|
3749
4102
|
|
|
3750
|
-
|
|
4103
|
+
await self._acleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4104
|
+
|
|
4105
|
+
yield pause_event # type: ignore
|
|
3751
4106
|
|
|
3752
4107
|
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
3753
4108
|
|
|
@@ -3824,7 +4179,11 @@ class Agent:
|
|
|
3824
4179
|
)
|
|
3825
4180
|
|
|
3826
4181
|
def _run_tool(
|
|
3827
|
-
self,
|
|
4182
|
+
self,
|
|
4183
|
+
run_response: RunOutput,
|
|
4184
|
+
run_messages: RunMessages,
|
|
4185
|
+
tool: ToolExecution,
|
|
4186
|
+
stream_events: bool = False,
|
|
3828
4187
|
) -> Iterator[RunOutputEvent]:
|
|
3829
4188
|
self.model = cast(Model, self.model)
|
|
3830
4189
|
# Execute the tool
|
|
@@ -3837,23 +4196,27 @@ class Agent:
|
|
|
3837
4196
|
):
|
|
3838
4197
|
if isinstance(call_result, ModelResponse):
|
|
3839
4198
|
if call_result.event == ModelResponseEvent.tool_call_started.value:
|
|
3840
|
-
|
|
3841
|
-
|
|
3842
|
-
|
|
3843
|
-
|
|
4199
|
+
if stream_events:
|
|
4200
|
+
yield handle_event( # type: ignore
|
|
4201
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4202
|
+
run_response,
|
|
4203
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4204
|
+
store_events=self.store_events,
|
|
4205
|
+
)
|
|
3844
4206
|
|
|
3845
4207
|
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
|
|
3846
4208
|
tool_execution = call_result.tool_executions[0]
|
|
3847
4209
|
tool.result = tool_execution.result
|
|
3848
4210
|
tool.tool_call_error = tool_execution.tool_call_error
|
|
3849
|
-
|
|
3850
|
-
|
|
3851
|
-
|
|
3852
|
-
|
|
3853
|
-
|
|
3854
|
-
|
|
3855
|
-
|
|
3856
|
-
|
|
4211
|
+
if stream_events:
|
|
4212
|
+
yield handle_event( # type: ignore
|
|
4213
|
+
create_tool_call_completed_event(
|
|
4214
|
+
from_run_response=run_response, tool=tool, content=call_result.content
|
|
4215
|
+
),
|
|
4216
|
+
run_response,
|
|
4217
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4218
|
+
store_events=self.store_events,
|
|
4219
|
+
)
|
|
3857
4220
|
|
|
3858
4221
|
if len(function_call_results) > 0:
|
|
3859
4222
|
run_messages.messages.extend(function_call_results)
|
|
@@ -3873,6 +4236,7 @@ class Agent:
|
|
|
3873
4236
|
run_response: RunOutput,
|
|
3874
4237
|
run_messages: RunMessages,
|
|
3875
4238
|
tool: ToolExecution,
|
|
4239
|
+
stream_events: bool = False,
|
|
3876
4240
|
) -> AsyncIterator[RunOutputEvent]:
|
|
3877
4241
|
self.model = cast(Model, self.model)
|
|
3878
4242
|
|
|
@@ -3887,22 +4251,26 @@ class Agent:
|
|
|
3887
4251
|
):
|
|
3888
4252
|
if isinstance(call_result, ModelResponse):
|
|
3889
4253
|
if call_result.event == ModelResponseEvent.tool_call_started.value:
|
|
3890
|
-
|
|
3891
|
-
|
|
3892
|
-
|
|
3893
|
-
|
|
4254
|
+
if stream_events:
|
|
4255
|
+
yield handle_event( # type: ignore
|
|
4256
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4257
|
+
run_response,
|
|
4258
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4259
|
+
store_events=self.store_events,
|
|
4260
|
+
)
|
|
3894
4261
|
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
|
|
3895
4262
|
tool_execution = call_result.tool_executions[0]
|
|
3896
4263
|
tool.result = tool_execution.result
|
|
3897
4264
|
tool.tool_call_error = tool_execution.tool_call_error
|
|
3898
|
-
|
|
3899
|
-
|
|
3900
|
-
|
|
3901
|
-
|
|
3902
|
-
|
|
3903
|
-
|
|
3904
|
-
|
|
3905
|
-
|
|
4265
|
+
if stream_events:
|
|
4266
|
+
yield handle_event( # type: ignore
|
|
4267
|
+
create_tool_call_completed_event(
|
|
4268
|
+
from_run_response=run_response, tool=tool, content=call_result.content
|
|
4269
|
+
),
|
|
4270
|
+
run_response,
|
|
4271
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4272
|
+
store_events=self.store_events,
|
|
4273
|
+
)
|
|
3906
4274
|
if len(function_call_results) > 0:
|
|
3907
4275
|
run_messages.messages.extend(function_call_results)
|
|
3908
4276
|
|
|
@@ -3944,7 +4312,7 @@ class Agent:
|
|
|
3944
4312
|
deque(self._run_tool(run_response, run_messages, _t), maxlen=0)
|
|
3945
4313
|
|
|
3946
4314
|
def _handle_tool_call_updates_stream(
|
|
3947
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
4315
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: bool = False
|
|
3948
4316
|
) -> Iterator[RunOutputEvent]:
|
|
3949
4317
|
self.model = cast(Model, self.model)
|
|
3950
4318
|
for _t in run_response.tools or []:
|
|
@@ -3952,7 +4320,7 @@ class Agent:
|
|
|
3952
4320
|
if _t.requires_confirmation is not None and _t.requires_confirmation is True and self._functions_for_model:
|
|
3953
4321
|
# Tool is confirmed and hasn't been run before
|
|
3954
4322
|
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
|
|
3955
|
-
yield from self._run_tool(run_response, run_messages, _t)
|
|
4323
|
+
yield from self._run_tool(run_response, run_messages, _t, stream_events=stream_events)
|
|
3956
4324
|
else:
|
|
3957
4325
|
self._reject_tool_call(run_messages, _t)
|
|
3958
4326
|
_t.confirmed = False
|
|
@@ -3977,7 +4345,7 @@ class Agent:
|
|
|
3977
4345
|
# Case 4: Handle user input required tools
|
|
3978
4346
|
elif _t.requires_user_input is not None and _t.requires_user_input is True:
|
|
3979
4347
|
self._handle_user_input_update(tool=_t)
|
|
3980
|
-
yield from self._run_tool(run_response, run_messages, _t)
|
|
4348
|
+
yield from self._run_tool(run_response, run_messages, _t, stream_events=stream_events)
|
|
3981
4349
|
_t.requires_user_input = False
|
|
3982
4350
|
_t.answered = True
|
|
3983
4351
|
|
|
@@ -4018,7 +4386,7 @@ class Agent:
|
|
|
4018
4386
|
_t.answered = True
|
|
4019
4387
|
|
|
4020
4388
|
async def _ahandle_tool_call_updates_stream(
|
|
4021
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
4389
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: bool = False
|
|
4022
4390
|
) -> AsyncIterator[RunOutputEvent]:
|
|
4023
4391
|
self.model = cast(Model, self.model)
|
|
4024
4392
|
for _t in run_response.tools or []:
|
|
@@ -4026,7 +4394,7 @@ class Agent:
|
|
|
4026
4394
|
if _t.requires_confirmation is not None and _t.requires_confirmation is True and self._functions_for_model:
|
|
4027
4395
|
# Tool is confirmed and hasn't been run before
|
|
4028
4396
|
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
|
|
4029
|
-
async for event in self._arun_tool(run_response, run_messages, _t):
|
|
4397
|
+
async for event in self._arun_tool(run_response, run_messages, _t, stream_events=stream_events):
|
|
4030
4398
|
yield event
|
|
4031
4399
|
else:
|
|
4032
4400
|
self._reject_tool_call(run_messages, _t)
|
|
@@ -4050,7 +4418,7 @@ class Agent:
|
|
|
4050
4418
|
# # Case 4: Handle user input required tools
|
|
4051
4419
|
elif _t.requires_user_input is not None and _t.requires_user_input is True:
|
|
4052
4420
|
self._handle_user_input_update(tool=_t)
|
|
4053
|
-
async for event in self._arun_tool(run_response, run_messages, _t):
|
|
4421
|
+
async for event in self._arun_tool(run_response, run_messages, _t, stream_events=stream_events):
|
|
4054
4422
|
yield event
|
|
4055
4423
|
_t.requires_user_input = False
|
|
4056
4424
|
_t.answered = True
|
|
@@ -4157,7 +4525,7 @@ class Agent:
|
|
|
4157
4525
|
run_response: RunOutput,
|
|
4158
4526
|
run_messages: RunMessages,
|
|
4159
4527
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
4160
|
-
|
|
4528
|
+
stream_events: bool = False,
|
|
4161
4529
|
) -> Iterator[RunOutputEvent]:
|
|
4162
4530
|
self.model = cast(Model, self.model)
|
|
4163
4531
|
|
|
@@ -4190,11 +4558,11 @@ class Agent:
|
|
|
4190
4558
|
model_response_event=model_response_event,
|
|
4191
4559
|
reasoning_state=reasoning_state,
|
|
4192
4560
|
parse_structured_output=self.should_parse_structured_output,
|
|
4193
|
-
|
|
4561
|
+
stream_events=stream_events,
|
|
4194
4562
|
)
|
|
4195
4563
|
|
|
4196
4564
|
# Determine reasoning completed
|
|
4197
|
-
if
|
|
4565
|
+
if stream_events and reasoning_state["reasoning_started"]:
|
|
4198
4566
|
all_reasoning_steps: List[ReasoningStep] = []
|
|
4199
4567
|
if run_response and run_response.reasoning_steps:
|
|
4200
4568
|
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
|
|
@@ -4204,13 +4572,15 @@ class Agent:
|
|
|
4204
4572
|
run_response=run_response,
|
|
4205
4573
|
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
|
|
4206
4574
|
)
|
|
4207
|
-
yield
|
|
4575
|
+
yield handle_event( # type: ignore
|
|
4208
4576
|
create_reasoning_completed_event(
|
|
4209
4577
|
from_run_response=run_response,
|
|
4210
4578
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
4211
4579
|
content_type=ReasoningSteps.__name__,
|
|
4212
4580
|
),
|
|
4213
4581
|
run_response,
|
|
4582
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4583
|
+
store_events=self.store_events,
|
|
4214
4584
|
)
|
|
4215
4585
|
|
|
4216
4586
|
# Update RunOutput
|
|
@@ -4233,7 +4603,7 @@ class Agent:
|
|
|
4233
4603
|
run_response: RunOutput,
|
|
4234
4604
|
run_messages: RunMessages,
|
|
4235
4605
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
4236
|
-
|
|
4606
|
+
stream_events: bool = False,
|
|
4237
4607
|
) -> AsyncIterator[RunOutputEvent]:
|
|
4238
4608
|
self.model = cast(Model, self.model)
|
|
4239
4609
|
|
|
@@ -4268,11 +4638,11 @@ class Agent:
|
|
|
4268
4638
|
model_response_event=model_response_event,
|
|
4269
4639
|
reasoning_state=reasoning_state,
|
|
4270
4640
|
parse_structured_output=self.should_parse_structured_output,
|
|
4271
|
-
|
|
4641
|
+
stream_events=stream_events,
|
|
4272
4642
|
):
|
|
4273
4643
|
yield event
|
|
4274
4644
|
|
|
4275
|
-
if
|
|
4645
|
+
if stream_events and reasoning_state["reasoning_started"]:
|
|
4276
4646
|
all_reasoning_steps: List[ReasoningStep] = []
|
|
4277
4647
|
if run_response and run_response.reasoning_steps:
|
|
4278
4648
|
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
|
|
@@ -4282,13 +4652,15 @@ class Agent:
|
|
|
4282
4652
|
run_response=run_response,
|
|
4283
4653
|
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
|
|
4284
4654
|
)
|
|
4285
|
-
yield
|
|
4655
|
+
yield handle_event( # type: ignore
|
|
4286
4656
|
create_reasoning_completed_event(
|
|
4287
4657
|
from_run_response=run_response,
|
|
4288
4658
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
4289
4659
|
content_type=ReasoningSteps.__name__,
|
|
4290
4660
|
),
|
|
4291
4661
|
run_response,
|
|
4662
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4663
|
+
store_events=self.store_events,
|
|
4292
4664
|
)
|
|
4293
4665
|
|
|
4294
4666
|
# Update RunOutput
|
|
@@ -4313,7 +4685,7 @@ class Agent:
|
|
|
4313
4685
|
model_response_event: Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent],
|
|
4314
4686
|
reasoning_state: Optional[Dict[str, Any]] = None,
|
|
4315
4687
|
parse_structured_output: bool = False,
|
|
4316
|
-
|
|
4688
|
+
stream_events: bool = False,
|
|
4317
4689
|
) -> Iterator[RunOutputEvent]:
|
|
4318
4690
|
if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
|
|
4319
4691
|
model_response_event, tuple(get_args(TeamRunOutputEvent))
|
|
@@ -4325,7 +4697,12 @@ class Agent:
|
|
|
4325
4697
|
model_response_event.run_id = run_response.run_id # type: ignore
|
|
4326
4698
|
|
|
4327
4699
|
# We just bubble the event up
|
|
4328
|
-
yield
|
|
4700
|
+
yield handle_event( # type: ignore
|
|
4701
|
+
model_response_event, # type: ignore
|
|
4702
|
+
run_response,
|
|
4703
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4704
|
+
store_events=self.store_events,
|
|
4705
|
+
)
|
|
4329
4706
|
else:
|
|
4330
4707
|
model_response_event = cast(ModelResponse, model_response_event)
|
|
4331
4708
|
# If the model response is an assistant_response, yield a RunOutput
|
|
@@ -4370,13 +4747,15 @@ class Agent:
|
|
|
4370
4747
|
|
|
4371
4748
|
# Only yield if we have content to show
|
|
4372
4749
|
if content_type != "str":
|
|
4373
|
-
yield
|
|
4750
|
+
yield handle_event( # type: ignore
|
|
4374
4751
|
create_run_output_content_event(
|
|
4375
4752
|
from_run_response=run_response,
|
|
4376
4753
|
content=model_response.content,
|
|
4377
4754
|
content_type=content_type,
|
|
4378
4755
|
),
|
|
4379
4756
|
run_response,
|
|
4757
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4758
|
+
store_events=self.store_events,
|
|
4380
4759
|
)
|
|
4381
4760
|
elif (
|
|
4382
4761
|
model_response_event.content is not None
|
|
@@ -4385,7 +4764,7 @@ class Agent:
|
|
|
4385
4764
|
or model_response_event.citations is not None
|
|
4386
4765
|
or model_response_event.provider_data is not None
|
|
4387
4766
|
):
|
|
4388
|
-
yield
|
|
4767
|
+
yield handle_event( # type: ignore
|
|
4389
4768
|
create_run_output_content_event(
|
|
4390
4769
|
from_run_response=run_response,
|
|
4391
4770
|
content=model_response_event.content,
|
|
@@ -4395,6 +4774,8 @@ class Agent:
|
|
|
4395
4774
|
model_provider_data=model_response_event.provider_data,
|
|
4396
4775
|
),
|
|
4397
4776
|
run_response,
|
|
4777
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4778
|
+
store_events=self.store_events,
|
|
4398
4779
|
)
|
|
4399
4780
|
|
|
4400
4781
|
# Process audio
|
|
@@ -4449,21 +4830,25 @@ class Agent:
|
|
|
4449
4830
|
)
|
|
4450
4831
|
run_response.created_at = model_response_event.created_at
|
|
4451
4832
|
|
|
4452
|
-
yield
|
|
4833
|
+
yield handle_event( # type: ignore
|
|
4453
4834
|
create_run_output_content_event(
|
|
4454
4835
|
from_run_response=run_response,
|
|
4455
4836
|
response_audio=run_response.response_audio,
|
|
4456
4837
|
),
|
|
4457
4838
|
run_response,
|
|
4839
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4840
|
+
store_events=self.store_events,
|
|
4458
4841
|
)
|
|
4459
4842
|
|
|
4460
4843
|
if model_response_event.images is not None:
|
|
4461
|
-
yield
|
|
4844
|
+
yield handle_event( # type: ignore
|
|
4462
4845
|
create_run_output_content_event(
|
|
4463
4846
|
from_run_response=run_response,
|
|
4464
4847
|
image=model_response_event.images[-1],
|
|
4465
4848
|
),
|
|
4466
4849
|
run_response,
|
|
4850
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4851
|
+
store_events=self.store_events,
|
|
4467
4852
|
)
|
|
4468
4853
|
|
|
4469
4854
|
if model_response.images is None:
|
|
@@ -4498,11 +4883,14 @@ class Agent:
|
|
|
4498
4883
|
run_response.tools.extend(tool_executions_list)
|
|
4499
4884
|
|
|
4500
4885
|
# Yield each tool call started event
|
|
4501
|
-
|
|
4502
|
-
|
|
4503
|
-
|
|
4504
|
-
|
|
4505
|
-
|
|
4886
|
+
if stream_events:
|
|
4887
|
+
for tool in tool_executions_list:
|
|
4888
|
+
yield handle_event( # type: ignore
|
|
4889
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4890
|
+
run_response,
|
|
4891
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4892
|
+
store_events=self.store_events,
|
|
4893
|
+
)
|
|
4506
4894
|
|
|
4507
4895
|
# If the model response is a tool_call_completed, update the existing tool call in the run_response
|
|
4508
4896
|
elif model_response_event.event == ModelResponseEvent.tool_call_completed.value:
|
|
@@ -4566,159 +4954,81 @@ class Agent:
|
|
|
4566
4954
|
"reasoning_time_taken"
|
|
4567
4955
|
] + float(tool_call_metrics.duration)
|
|
4568
4956
|
|
|
4569
|
-
|
|
4570
|
-
|
|
4571
|
-
|
|
4572
|
-
|
|
4573
|
-
|
|
4574
|
-
|
|
4575
|
-
|
|
4576
|
-
|
|
4957
|
+
if stream_events:
|
|
4958
|
+
yield handle_event( # type: ignore
|
|
4959
|
+
create_tool_call_completed_event(
|
|
4960
|
+
from_run_response=run_response, tool=tool_call, content=model_response_event.content
|
|
4961
|
+
),
|
|
4962
|
+
run_response,
|
|
4963
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4964
|
+
store_events=self.store_events,
|
|
4965
|
+
)
|
|
4577
4966
|
|
|
4578
|
-
if
|
|
4967
|
+
if stream_events:
|
|
4579
4968
|
if reasoning_step is not None:
|
|
4580
4969
|
if reasoning_state and not reasoning_state["reasoning_started"]:
|
|
4581
|
-
yield
|
|
4970
|
+
yield handle_event( # type: ignore
|
|
4582
4971
|
create_reasoning_started_event(from_run_response=run_response),
|
|
4583
4972
|
run_response,
|
|
4973
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4974
|
+
store_events=self.store_events,
|
|
4584
4975
|
)
|
|
4585
4976
|
reasoning_state["reasoning_started"] = True
|
|
4586
4977
|
|
|
4587
|
-
yield
|
|
4978
|
+
yield handle_event( # type: ignore
|
|
4588
4979
|
create_reasoning_step_event(
|
|
4589
4980
|
from_run_response=run_response,
|
|
4590
4981
|
reasoning_step=reasoning_step,
|
|
4591
4982
|
reasoning_content=run_response.reasoning_content or "",
|
|
4592
4983
|
),
|
|
4593
4984
|
run_response,
|
|
4985
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4986
|
+
store_events=self.store_events,
|
|
4594
4987
|
)
|
|
4595
4988
|
|
|
4596
|
-
def
|
|
4989
|
+
def _make_cultural_knowledge(
|
|
4597
4990
|
self,
|
|
4598
|
-
run_response: RunOutput,
|
|
4599
4991
|
run_messages: RunMessages,
|
|
4600
|
-
|
|
4601
|
-
|
|
4602
|
-
|
|
4603
|
-
|
|
4604
|
-
|
|
4605
|
-
|
|
4606
|
-
|
|
4607
|
-
|
|
4608
|
-
user_message_str = (
|
|
4609
|
-
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
4610
|
-
)
|
|
4611
|
-
|
|
4612
|
-
# Create user memories
|
|
4613
|
-
if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
4614
|
-
log_debug("Creating user memories.")
|
|
4615
|
-
futures.append(
|
|
4616
|
-
executor.submit(
|
|
4617
|
-
self.memory_manager.create_user_memories,
|
|
4618
|
-
message=user_message_str,
|
|
4619
|
-
user_id=user_id,
|
|
4620
|
-
agent_id=self.id,
|
|
4621
|
-
)
|
|
4622
|
-
)
|
|
4623
|
-
|
|
4624
|
-
# Parse messages if provided
|
|
4625
|
-
if (
|
|
4626
|
-
self.enable_user_memories
|
|
4627
|
-
and run_messages.extra_messages is not None
|
|
4628
|
-
and len(run_messages.extra_messages) > 0
|
|
4629
|
-
):
|
|
4630
|
-
parsed_messages = []
|
|
4631
|
-
for _im in run_messages.extra_messages:
|
|
4632
|
-
if isinstance(_im, Message):
|
|
4633
|
-
parsed_messages.append(_im)
|
|
4634
|
-
elif isinstance(_im, dict):
|
|
4635
|
-
try:
|
|
4636
|
-
parsed_messages.append(Message(**_im))
|
|
4637
|
-
except Exception as e:
|
|
4638
|
-
log_warning(f"Failed to validate message during memory update: {e}")
|
|
4639
|
-
else:
|
|
4640
|
-
log_warning(f"Unsupported message type: {type(_im)}")
|
|
4641
|
-
continue
|
|
4642
|
-
|
|
4643
|
-
if len(parsed_messages) > 0 and self.memory_manager is not None:
|
|
4644
|
-
futures.append(
|
|
4645
|
-
executor.submit(
|
|
4646
|
-
self.memory_manager.create_user_memories,
|
|
4647
|
-
messages=parsed_messages,
|
|
4648
|
-
user_id=user_id,
|
|
4649
|
-
agent_id=self.id,
|
|
4650
|
-
)
|
|
4651
|
-
)
|
|
4652
|
-
else:
|
|
4653
|
-
log_warning("Unable to add messages to memory")
|
|
4654
|
-
|
|
4655
|
-
# Create cultural knowledge
|
|
4656
|
-
if user_message_str is not None and self.culture_manager is not None and self.update_cultural_knowledge:
|
|
4657
|
-
log_debug("Creating cultural knowledge.")
|
|
4658
|
-
futures.append(
|
|
4659
|
-
executor.submit(
|
|
4660
|
-
self.culture_manager.create_cultural_knowledge,
|
|
4661
|
-
message=user_message_str,
|
|
4662
|
-
)
|
|
4663
|
-
)
|
|
4664
|
-
|
|
4665
|
-
# Create session summary
|
|
4666
|
-
if self.session_summary_manager is not None:
|
|
4667
|
-
log_debug("Creating session summary.")
|
|
4668
|
-
futures.append(
|
|
4669
|
-
executor.submit(
|
|
4670
|
-
self.session_summary_manager.create_session_summary, # type: ignore
|
|
4671
|
-
session=session,
|
|
4672
|
-
)
|
|
4673
|
-
)
|
|
4674
|
-
|
|
4675
|
-
if futures:
|
|
4676
|
-
if self.stream_intermediate_steps:
|
|
4677
|
-
yield self._handle_event(
|
|
4678
|
-
create_memory_update_started_event(from_run_response=run_response),
|
|
4679
|
-
run_response,
|
|
4680
|
-
)
|
|
4681
|
-
|
|
4682
|
-
# Wait for all operations to complete and handle any errors
|
|
4683
|
-
for future in as_completed(futures):
|
|
4684
|
-
try:
|
|
4685
|
-
future.result()
|
|
4686
|
-
except Exception as e:
|
|
4687
|
-
log_warning(f"Error in memory/summary operation: {str(e)}")
|
|
4992
|
+
):
|
|
4993
|
+
if (
|
|
4994
|
+
run_messages.user_message is not None
|
|
4995
|
+
and self.culture_manager is not None
|
|
4996
|
+
and self.update_cultural_knowledge
|
|
4997
|
+
):
|
|
4998
|
+
log_debug("Creating cultural knowledge.")
|
|
4999
|
+
self.culture_manager.create_cultural_knowledge(message=run_messages.user_message.get_content_string())
|
|
4688
5000
|
|
|
4689
|
-
|
|
4690
|
-
|
|
4691
|
-
|
|
4692
|
-
|
|
4693
|
-
|
|
5001
|
+
async def _acreate_cultural_knowledge(
|
|
5002
|
+
self,
|
|
5003
|
+
run_messages: RunMessages,
|
|
5004
|
+
):
|
|
5005
|
+
if (
|
|
5006
|
+
run_messages.user_message is not None
|
|
5007
|
+
and self.culture_manager is not None
|
|
5008
|
+
and self.update_cultural_knowledge
|
|
5009
|
+
):
|
|
5010
|
+
log_debug("Creating cultural knowledge.")
|
|
5011
|
+
await self.culture_manager.acreate_cultural_knowledge(
|
|
5012
|
+
message=run_messages.user_message.get_content_string()
|
|
5013
|
+
)
|
|
4694
5014
|
|
|
4695
|
-
|
|
5015
|
+
def _make_memories(
|
|
4696
5016
|
self,
|
|
4697
|
-
run_response: RunOutput,
|
|
4698
5017
|
run_messages: RunMessages,
|
|
4699
|
-
session: AgentSession,
|
|
4700
5018
|
user_id: Optional[str] = None,
|
|
4701
|
-
)
|
|
4702
|
-
|
|
4703
|
-
|
|
4704
|
-
|
|
4705
|
-
if
|
|
5019
|
+
):
|
|
5020
|
+
user_message_str = (
|
|
5021
|
+
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5022
|
+
)
|
|
5023
|
+
if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
|
|
4706
5024
|
log_debug("Creating user memories.")
|
|
4707
|
-
|
|
4708
|
-
|
|
4709
|
-
|
|
4710
|
-
|
|
4711
|
-
user_id=user_id,
|
|
4712
|
-
agent_id=self.id,
|
|
4713
|
-
)
|
|
5025
|
+
self.memory_manager.create_user_memories( # type: ignore
|
|
5026
|
+
message=user_message_str,
|
|
5027
|
+
user_id=user_id,
|
|
5028
|
+
agent_id=self.id,
|
|
4714
5029
|
)
|
|
4715
5030
|
|
|
4716
|
-
|
|
4717
|
-
if (
|
|
4718
|
-
self.memory_manager is not None
|
|
4719
|
-
and run_messages.extra_messages is not None
|
|
4720
|
-
and len(run_messages.extra_messages) > 0
|
|
4721
|
-
):
|
|
5031
|
+
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
|
|
4722
5032
|
parsed_messages = []
|
|
4723
5033
|
for _im in run_messages.extra_messages:
|
|
4724
5034
|
if isinstance(_im, Message):
|
|
@@ -4732,54 +5042,59 @@ class Agent:
|
|
|
4732
5042
|
log_warning(f"Unsupported message type: {type(_im)}")
|
|
4733
5043
|
continue
|
|
4734
5044
|
|
|
4735
|
-
|
|
4736
|
-
|
|
4737
|
-
|
|
4738
|
-
|
|
4739
|
-
|
|
4740
|
-
|
|
5045
|
+
# Filter out messages with empty content before passing to memory manager
|
|
5046
|
+
non_empty_messages = [
|
|
5047
|
+
msg
|
|
5048
|
+
for msg in parsed_messages
|
|
5049
|
+
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5050
|
+
]
|
|
5051
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5052
|
+
self.memory_manager.create_user_memories(messages=non_empty_messages, user_id=user_id, agent_id=self.id) # type: ignore
|
|
4741
5053
|
else:
|
|
4742
5054
|
log_warning("Unable to add messages to memory")
|
|
4743
5055
|
|
|
4744
|
-
|
|
4745
|
-
|
|
4746
|
-
|
|
4747
|
-
|
|
4748
|
-
|
|
4749
|
-
|
|
4750
|
-
|
|
4751
|
-
|
|
4752
|
-
|
|
4753
|
-
|
|
4754
|
-
|
|
4755
|
-
|
|
4756
|
-
|
|
4757
|
-
|
|
4758
|
-
log_debug("Creating session summary.")
|
|
4759
|
-
tasks.append(
|
|
4760
|
-
self.session_summary_manager.acreate_session_summary(
|
|
4761
|
-
session=session,
|
|
4762
|
-
)
|
|
5056
|
+
async def _amake_memories(
|
|
5057
|
+
self,
|
|
5058
|
+
run_messages: RunMessages,
|
|
5059
|
+
user_id: Optional[str] = None,
|
|
5060
|
+
):
|
|
5061
|
+
user_message_str = (
|
|
5062
|
+
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5063
|
+
)
|
|
5064
|
+
if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
|
|
5065
|
+
log_debug("Creating user memories.")
|
|
5066
|
+
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5067
|
+
message=user_message_str,
|
|
5068
|
+
user_id=user_id,
|
|
5069
|
+
agent_id=self.id,
|
|
4763
5070
|
)
|
|
4764
5071
|
|
|
4765
|
-
if
|
|
4766
|
-
|
|
4767
|
-
|
|
4768
|
-
|
|
4769
|
-
|
|
4770
|
-
)
|
|
4771
|
-
|
|
4772
|
-
|
|
4773
|
-
|
|
4774
|
-
|
|
4775
|
-
|
|
4776
|
-
|
|
5072
|
+
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
|
|
5073
|
+
parsed_messages = []
|
|
5074
|
+
for _im in run_messages.extra_messages:
|
|
5075
|
+
if isinstance(_im, Message):
|
|
5076
|
+
parsed_messages.append(_im)
|
|
5077
|
+
elif isinstance(_im, dict):
|
|
5078
|
+
try:
|
|
5079
|
+
parsed_messages.append(Message(**_im))
|
|
5080
|
+
except Exception as e:
|
|
5081
|
+
log_warning(f"Failed to validate message during memory update: {e}")
|
|
5082
|
+
else:
|
|
5083
|
+
log_warning(f"Unsupported message type: {type(_im)}")
|
|
5084
|
+
continue
|
|
4777
5085
|
|
|
4778
|
-
|
|
4779
|
-
|
|
4780
|
-
|
|
4781
|
-
|
|
5086
|
+
# Filter out messages with empty content before passing to memory manager
|
|
5087
|
+
non_empty_messages = [
|
|
5088
|
+
msg
|
|
5089
|
+
for msg in parsed_messages
|
|
5090
|
+
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5091
|
+
]
|
|
5092
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5093
|
+
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5094
|
+
messages=non_empty_messages, user_id=user_id, agent_id=self.id
|
|
4782
5095
|
)
|
|
5096
|
+
else:
|
|
5097
|
+
log_warning("Unable to add messages to memory")
|
|
4783
5098
|
|
|
4784
5099
|
def _raise_if_async_tools(self) -> None:
|
|
4785
5100
|
"""Raise an exception if any tools contain async functions"""
|
|
@@ -4878,7 +5193,7 @@ class Agent:
|
|
|
4878
5193
|
self._rebuild_tools = True
|
|
4879
5194
|
|
|
4880
5195
|
if self.enable_agentic_state:
|
|
4881
|
-
agent_tools.append(self.
|
|
5196
|
+
agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
|
|
4882
5197
|
|
|
4883
5198
|
# Add tools for accessing knowledge
|
|
4884
5199
|
if self.knowledge is not None or self.knowledge_retriever is not None:
|
|
@@ -4976,7 +5291,7 @@ class Agent:
|
|
|
4976
5291
|
self._rebuild_tools = True
|
|
4977
5292
|
|
|
4978
5293
|
if self.enable_agentic_state:
|
|
4979
|
-
agent_tools.append(self.
|
|
5294
|
+
agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
|
|
4980
5295
|
|
|
4981
5296
|
# Add tools for accessing knowledge
|
|
4982
5297
|
if self.knowledge is not None or self.knowledge_retriever is not None:
|
|
@@ -5014,137 +5329,6 @@ class Agent:
|
|
|
5014
5329
|
|
|
5015
5330
|
return agent_tools
|
|
5016
5331
|
|
|
5017
|
-
def _collect_joint_images(
|
|
5018
|
-
self,
|
|
5019
|
-
run_input: Optional[RunInput] = None,
|
|
5020
|
-
session: Optional[AgentSession] = None,
|
|
5021
|
-
) -> Optional[Sequence[Image]]:
|
|
5022
|
-
"""Collect images from input, session history, and current run response."""
|
|
5023
|
-
joint_images: List[Image] = []
|
|
5024
|
-
|
|
5025
|
-
# 1. Add images from current input
|
|
5026
|
-
if run_input and run_input.images:
|
|
5027
|
-
joint_images.extend(run_input.images)
|
|
5028
|
-
log_debug(f"Added {len(run_input.images)} input images to joint list")
|
|
5029
|
-
|
|
5030
|
-
# 2. Add images from session history (from both input and generated sources)
|
|
5031
|
-
try:
|
|
5032
|
-
if session and session.runs:
|
|
5033
|
-
for historical_run in session.runs:
|
|
5034
|
-
# Add generated images from previous runs
|
|
5035
|
-
if historical_run.images:
|
|
5036
|
-
joint_images.extend(historical_run.images)
|
|
5037
|
-
log_debug(
|
|
5038
|
-
f"Added {len(historical_run.images)} generated images from historical run {historical_run.run_id}"
|
|
5039
|
-
)
|
|
5040
|
-
|
|
5041
|
-
# Add input images from previous runs
|
|
5042
|
-
if historical_run.input and historical_run.input.images:
|
|
5043
|
-
joint_images.extend(historical_run.input.images)
|
|
5044
|
-
log_debug(
|
|
5045
|
-
f"Added {len(historical_run.input.images)} input images from historical run {historical_run.run_id}"
|
|
5046
|
-
)
|
|
5047
|
-
except Exception as e:
|
|
5048
|
-
log_debug(f"Could not access session history for images: {e}")
|
|
5049
|
-
|
|
5050
|
-
if joint_images:
|
|
5051
|
-
log_debug(f"Images Available to Model: {len(joint_images)} images")
|
|
5052
|
-
return joint_images if joint_images else None
|
|
5053
|
-
|
|
5054
|
-
def _collect_joint_videos(
|
|
5055
|
-
self,
|
|
5056
|
-
run_input: Optional[RunInput] = None,
|
|
5057
|
-
session: Optional[AgentSession] = None,
|
|
5058
|
-
) -> Optional[Sequence[Video]]:
|
|
5059
|
-
"""Collect videos from input, session history, and current run response."""
|
|
5060
|
-
joint_videos: List[Video] = []
|
|
5061
|
-
|
|
5062
|
-
# 1. Add videos from current input
|
|
5063
|
-
if run_input and run_input.videos:
|
|
5064
|
-
joint_videos.extend(run_input.videos)
|
|
5065
|
-
log_debug(f"Added {len(run_input.videos)} input videos to joint list")
|
|
5066
|
-
|
|
5067
|
-
# 2. Add videos from session history (from both input and generated sources)
|
|
5068
|
-
try:
|
|
5069
|
-
if session and session.runs:
|
|
5070
|
-
for historical_run in session.runs:
|
|
5071
|
-
# Add generated videos from previous runs
|
|
5072
|
-
if historical_run.videos:
|
|
5073
|
-
joint_videos.extend(historical_run.videos)
|
|
5074
|
-
log_debug(
|
|
5075
|
-
f"Added {len(historical_run.videos)} generated videos from historical run {historical_run.run_id}"
|
|
5076
|
-
)
|
|
5077
|
-
|
|
5078
|
-
# Add input videos from previous runs
|
|
5079
|
-
if historical_run.input and historical_run.input.videos:
|
|
5080
|
-
joint_videos.extend(historical_run.input.videos)
|
|
5081
|
-
log_debug(
|
|
5082
|
-
f"Added {len(historical_run.input.videos)} input videos from historical run {historical_run.run_id}"
|
|
5083
|
-
)
|
|
5084
|
-
except Exception as e:
|
|
5085
|
-
log_debug(f"Could not access session history for videos: {e}")
|
|
5086
|
-
|
|
5087
|
-
if joint_videos:
|
|
5088
|
-
log_debug(f"Videos Available to Model: {len(joint_videos)} videos")
|
|
5089
|
-
return joint_videos if joint_videos else None
|
|
5090
|
-
|
|
5091
|
-
def _collect_joint_audios(
|
|
5092
|
-
self,
|
|
5093
|
-
run_input: Optional[RunInput] = None,
|
|
5094
|
-
session: Optional[AgentSession] = None,
|
|
5095
|
-
) -> Optional[Sequence[Audio]]:
|
|
5096
|
-
"""Collect audios from input, session history, and current run response."""
|
|
5097
|
-
joint_audios: List[Audio] = []
|
|
5098
|
-
|
|
5099
|
-
# 1. Add audios from current input
|
|
5100
|
-
if run_input and run_input.audios:
|
|
5101
|
-
joint_audios.extend(run_input.audios)
|
|
5102
|
-
log_debug(f"Added {len(run_input.audios)} input audios to joint list")
|
|
5103
|
-
|
|
5104
|
-
# 2. Add audios from session history (from both input and generated sources)
|
|
5105
|
-
try:
|
|
5106
|
-
if session and session.runs:
|
|
5107
|
-
for historical_run in session.runs:
|
|
5108
|
-
# Add generated audios from previous runs
|
|
5109
|
-
if historical_run.audio:
|
|
5110
|
-
joint_audios.extend(historical_run.audio)
|
|
5111
|
-
log_debug(
|
|
5112
|
-
f"Added {len(historical_run.audio)} generated audios from historical run {historical_run.run_id}"
|
|
5113
|
-
)
|
|
5114
|
-
|
|
5115
|
-
# Add input audios from previous runs
|
|
5116
|
-
if historical_run.input and historical_run.input.audios:
|
|
5117
|
-
joint_audios.extend(historical_run.input.audios)
|
|
5118
|
-
log_debug(
|
|
5119
|
-
f"Added {len(historical_run.input.audios)} input audios from historical run {historical_run.run_id}"
|
|
5120
|
-
)
|
|
5121
|
-
except Exception as e:
|
|
5122
|
-
log_debug(f"Could not access session history for audios: {e}")
|
|
5123
|
-
|
|
5124
|
-
if joint_audios:
|
|
5125
|
-
log_debug(f"Audios Available to Model: {len(joint_audios)} audios")
|
|
5126
|
-
return joint_audios if joint_audios else None
|
|
5127
|
-
|
|
5128
|
-
def _collect_joint_files(
|
|
5129
|
-
self,
|
|
5130
|
-
run_input: Optional[RunInput] = None,
|
|
5131
|
-
) -> Optional[Sequence[File]]:
|
|
5132
|
-
"""Collect files from input and session history."""
|
|
5133
|
-
from agno.utils.log import log_debug
|
|
5134
|
-
|
|
5135
|
-
joint_files: List[File] = []
|
|
5136
|
-
|
|
5137
|
-
# 1. Add files from current input
|
|
5138
|
-
if run_input and run_input.files:
|
|
5139
|
-
joint_files.extend(run_input.files)
|
|
5140
|
-
|
|
5141
|
-
# TODO: Files aren't stored in session history yet and dont have a FileArtifact
|
|
5142
|
-
|
|
5143
|
-
if joint_files:
|
|
5144
|
-
log_debug(f"Files Available to Model: {len(joint_files)} files")
|
|
5145
|
-
|
|
5146
|
-
return joint_files if joint_files else None
|
|
5147
|
-
|
|
5148
5332
|
def _determine_tools_for_model(
|
|
5149
5333
|
self,
|
|
5150
5334
|
model: Model,
|
|
@@ -5254,10 +5438,10 @@ class Agent:
|
|
|
5254
5438
|
)
|
|
5255
5439
|
|
|
5256
5440
|
# Only collect media if functions actually need them
|
|
5257
|
-
joint_images =
|
|
5258
|
-
joint_files =
|
|
5259
|
-
joint_audios =
|
|
5260
|
-
joint_videos =
|
|
5441
|
+
joint_images = collect_joint_images(run_response.input, session) if needs_media else None
|
|
5442
|
+
joint_files = collect_joint_files(run_response.input) if needs_media else None
|
|
5443
|
+
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None
|
|
5444
|
+
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None
|
|
5261
5445
|
|
|
5262
5446
|
for func in self._functions_for_model.values():
|
|
5263
5447
|
func._session_state = session_state
|
|
@@ -5376,10 +5560,10 @@ class Agent:
|
|
|
5376
5560
|
)
|
|
5377
5561
|
|
|
5378
5562
|
# Only collect media if functions actually need them
|
|
5379
|
-
joint_images =
|
|
5380
|
-
joint_files =
|
|
5381
|
-
joint_audios =
|
|
5382
|
-
joint_videos =
|
|
5563
|
+
joint_images = collect_joint_images(run_response.input, session) if needs_media else None
|
|
5564
|
+
joint_files = collect_joint_files(run_response.input) if needs_media else None
|
|
5565
|
+
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None
|
|
5566
|
+
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None
|
|
5383
5567
|
|
|
5384
5568
|
for func in self._functions_for_model.values():
|
|
5385
5569
|
func._session_state = session_state
|
|
@@ -5488,17 +5672,21 @@ class Agent:
|
|
|
5488
5672
|
return agent_data
|
|
5489
5673
|
|
|
5490
5674
|
# -*- Session Database Functions
|
|
5491
|
-
def _read_session(
|
|
5675
|
+
def _read_session(
|
|
5676
|
+
self, session_id: str, session_type: SessionType = SessionType.AGENT
|
|
5677
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
|
|
5492
5678
|
"""Get a Session from the database."""
|
|
5493
5679
|
try:
|
|
5494
5680
|
if not self.db:
|
|
5495
5681
|
raise ValueError("Db not initialized")
|
|
5496
|
-
return self.db.get_session(session_id=session_id, session_type=
|
|
5682
|
+
return self.db.get_session(session_id=session_id, session_type=session_type) # type: ignore
|
|
5497
5683
|
except Exception as e:
|
|
5498
5684
|
log_warning(f"Error getting session from db: {e}")
|
|
5499
5685
|
return None
|
|
5500
5686
|
|
|
5501
|
-
async def _aread_session(
|
|
5687
|
+
async def _aread_session(
|
|
5688
|
+
self, session_id: str, session_type: SessionType = SessionType.AGENT
|
|
5689
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
|
|
5502
5690
|
"""Get a Session from the database."""
|
|
5503
5691
|
try:
|
|
5504
5692
|
if not self.db:
|
|
@@ -5599,12 +5787,17 @@ class Agent:
|
|
|
5599
5787
|
if agent_session is None:
|
|
5600
5788
|
# Creating new session if none found
|
|
5601
5789
|
log_debug(f"Creating new AgentSession: {session_id}")
|
|
5790
|
+
session_data = {}
|
|
5791
|
+
if self.session_state is not None:
|
|
5792
|
+
from copy import deepcopy
|
|
5793
|
+
|
|
5794
|
+
session_data["session_state"] = deepcopy(self.session_state)
|
|
5602
5795
|
agent_session = AgentSession(
|
|
5603
5796
|
session_id=session_id,
|
|
5604
5797
|
agent_id=self.id,
|
|
5605
5798
|
user_id=user_id,
|
|
5606
5799
|
agent_data=self._get_agent_data(),
|
|
5607
|
-
session_data=
|
|
5800
|
+
session_data=session_data,
|
|
5608
5801
|
metadata=self.metadata,
|
|
5609
5802
|
created_at=int(time()),
|
|
5610
5803
|
)
|
|
@@ -5629,18 +5822,25 @@ class Agent:
|
|
|
5629
5822
|
agent_session = None
|
|
5630
5823
|
if self.db is not None and self.team_id is None and self.workflow_id is None:
|
|
5631
5824
|
log_debug(f"Reading AgentSession: {session_id}")
|
|
5632
|
-
|
|
5633
|
-
|
|
5825
|
+
if self._has_async_db():
|
|
5826
|
+
agent_session = cast(AgentSession, await self._aread_session(session_id=session_id))
|
|
5827
|
+
else:
|
|
5828
|
+
agent_session = cast(AgentSession, self._read_session(session_id=session_id))
|
|
5634
5829
|
|
|
5635
5830
|
if agent_session is None:
|
|
5636
5831
|
# Creating new session if none found
|
|
5637
5832
|
log_debug(f"Creating new AgentSession: {session_id}")
|
|
5833
|
+
session_data = {}
|
|
5834
|
+
if self.session_state is not None:
|
|
5835
|
+
from copy import deepcopy
|
|
5836
|
+
|
|
5837
|
+
session_data["session_state"] = deepcopy(self.session_state)
|
|
5638
5838
|
agent_session = AgentSession(
|
|
5639
5839
|
session_id=session_id,
|
|
5640
5840
|
agent_id=self.id,
|
|
5641
5841
|
user_id=user_id,
|
|
5642
5842
|
agent_data=self._get_agent_data(),
|
|
5643
|
-
session_data=
|
|
5843
|
+
session_data=session_data,
|
|
5644
5844
|
metadata=self.metadata,
|
|
5645
5845
|
created_at=int(time()),
|
|
5646
5846
|
)
|
|
@@ -5666,13 +5866,13 @@ class Agent:
|
|
|
5666
5866
|
log_warning(f"RunOutput {run_id} not found in AgentSession {self._agent_session.session_id}")
|
|
5667
5867
|
return None
|
|
5668
5868
|
else:
|
|
5669
|
-
|
|
5670
|
-
if
|
|
5671
|
-
run_response =
|
|
5869
|
+
session = self.get_session(session_id=session_id)
|
|
5870
|
+
if session is not None:
|
|
5871
|
+
run_response = session.get_run(run_id=run_id)
|
|
5672
5872
|
if run_response is not None:
|
|
5673
5873
|
return run_response
|
|
5674
5874
|
else:
|
|
5675
|
-
log_warning(f"RunOutput {run_id} not found in
|
|
5875
|
+
log_warning(f"RunOutput {run_id} not found in Session {session_id}")
|
|
5676
5876
|
return None
|
|
5677
5877
|
|
|
5678
5878
|
def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[RunOutput]:
|
|
@@ -5690,17 +5890,17 @@ class Agent:
|
|
|
5690
5890
|
and self._agent_session.runs is not None
|
|
5691
5891
|
and len(self._agent_session.runs) > 0
|
|
5692
5892
|
):
|
|
5693
|
-
|
|
5694
|
-
|
|
5695
|
-
|
|
5893
|
+
for run_output in reversed(self._agent_session.runs):
|
|
5894
|
+
if hasattr(run_output, "agent_id") and run_output.agent_id == self.id:
|
|
5895
|
+
return run_output
|
|
5696
5896
|
else:
|
|
5697
|
-
|
|
5698
|
-
if
|
|
5699
|
-
|
|
5700
|
-
|
|
5701
|
-
|
|
5897
|
+
session = self.get_session(session_id=session_id)
|
|
5898
|
+
if session is not None and session.runs is not None and len(session.runs) > 0:
|
|
5899
|
+
for run_output in reversed(session.runs):
|
|
5900
|
+
if hasattr(run_output, "agent_id") and run_output.agent_id == self.id:
|
|
5901
|
+
return run_output
|
|
5702
5902
|
else:
|
|
5703
|
-
log_warning(f"No run responses found in
|
|
5903
|
+
log_warning(f"No run responses found in Session {session_id}")
|
|
5704
5904
|
return None
|
|
5705
5905
|
|
|
5706
5906
|
def cancel_run(self, run_id: str) -> bool:
|
|
@@ -5738,7 +5938,65 @@ class Agent:
|
|
|
5738
5938
|
|
|
5739
5939
|
# Load and return the session from the database
|
|
5740
5940
|
if self.db is not None:
|
|
5741
|
-
|
|
5941
|
+
loaded_session = None
|
|
5942
|
+
|
|
5943
|
+
# We have a standalone agent, so we are loading an AgentSession
|
|
5944
|
+
if self.team_id is None and self.workflow_id is None:
|
|
5945
|
+
loaded_session = cast(
|
|
5946
|
+
AgentSession,
|
|
5947
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.AGENT), # type: ignore
|
|
5948
|
+
)
|
|
5949
|
+
|
|
5950
|
+
# We have a team member agent, so we are loading a TeamSession
|
|
5951
|
+
if loaded_session is None and self.team_id is not None:
|
|
5952
|
+
# Load session for team member agents
|
|
5953
|
+
loaded_session = cast(
|
|
5954
|
+
TeamSession,
|
|
5955
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.TEAM), # type: ignore
|
|
5956
|
+
)
|
|
5957
|
+
|
|
5958
|
+
# We have a workflow member agent, so we are loading a WorkflowSession
|
|
5959
|
+
if loaded_session is None and self.workflow_id is not None:
|
|
5960
|
+
# Load session for workflow memberagents
|
|
5961
|
+
loaded_session = cast(
|
|
5962
|
+
WorkflowSession,
|
|
5963
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.WORKFLOW), # type: ignore
|
|
5964
|
+
)
|
|
5965
|
+
|
|
5966
|
+
# Cache the session if relevant
|
|
5967
|
+
if loaded_session is not None and self.cache_session:
|
|
5968
|
+
self._agent_session = loaded_session
|
|
5969
|
+
|
|
5970
|
+
return loaded_session
|
|
5971
|
+
|
|
5972
|
+
log_debug(f"Session {session_id_to_load} not found in db")
|
|
5973
|
+
return None
|
|
5974
|
+
|
|
5975
|
+
async def aget_session(
|
|
5976
|
+
self,
|
|
5977
|
+
session_id: Optional[str] = None,
|
|
5978
|
+
) -> Optional[AgentSession]:
|
|
5979
|
+
"""Load an AgentSession from database or cache.
|
|
5980
|
+
|
|
5981
|
+
Args:
|
|
5982
|
+
session_id: The session_id to load from storage.
|
|
5983
|
+
|
|
5984
|
+
Returns:
|
|
5985
|
+
AgentSession: The AgentSession loaded from the database/cache or None if not found.
|
|
5986
|
+
"""
|
|
5987
|
+
if not session_id and not self.session_id:
|
|
5988
|
+
raise Exception("No session_id provided")
|
|
5989
|
+
|
|
5990
|
+
session_id_to_load = session_id or self.session_id
|
|
5991
|
+
|
|
5992
|
+
# If there is a cached session, return it
|
|
5993
|
+
if self.cache_session and hasattr(self, "_agent_session") and self._agent_session is not None:
|
|
5994
|
+
if self._agent_session.session_id == session_id_to_load:
|
|
5995
|
+
return self._agent_session
|
|
5996
|
+
|
|
5997
|
+
# Load and return the session from the database
|
|
5998
|
+
if self.db is not None:
|
|
5999
|
+
agent_session = cast(AgentSession, await self._aread_session(session_id=session_id_to_load)) # type: ignore
|
|
5742
6000
|
|
|
5743
6001
|
# Cache the session if relevant
|
|
5744
6002
|
if agent_session is not None and self.cache_session:
|
|
@@ -5787,8 +6045,10 @@ class Agent:
|
|
|
5787
6045
|
session.session_data["session_state"].pop("current_session_id", None)
|
|
5788
6046
|
session.session_data["session_state"].pop("current_user_id", None)
|
|
5789
6047
|
session.session_data["session_state"].pop("current_run_id", None)
|
|
5790
|
-
|
|
5791
|
-
|
|
6048
|
+
if self._has_async_db():
|
|
6049
|
+
await self._aupsert_session(session=session)
|
|
6050
|
+
else:
|
|
6051
|
+
self._upsert_session(session=session)
|
|
5792
6052
|
log_debug(f"Created or updated AgentSession record: {session.session_id}")
|
|
5793
6053
|
|
|
5794
6054
|
def get_chat_history(self, session_id: Optional[str] = None) -> List[Message]:
|
|
@@ -5918,6 +6178,61 @@ class Agent:
|
|
|
5918
6178
|
raise Exception("Session not found")
|
|
5919
6179
|
return session.session_data.get("session_state", {}) if session.session_data is not None else {}
|
|
5920
6180
|
|
|
6181
|
+
def update_session_state(self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None) -> str:
|
|
6182
|
+
"""
|
|
6183
|
+
Update the session state for the given session ID and user ID.
|
|
6184
|
+
Args:
|
|
6185
|
+
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
|
|
6186
|
+
session_id: The session ID to update. If not provided, the current cached session ID is used.
|
|
6187
|
+
Returns:
|
|
6188
|
+
dict: The updated session state.
|
|
6189
|
+
"""
|
|
6190
|
+
session_id = session_id or self.session_id
|
|
6191
|
+
if session_id is None:
|
|
6192
|
+
raise Exception("Session ID is not set")
|
|
6193
|
+
session = self.get_session(session_id=session_id) # type: ignore
|
|
6194
|
+
if session is None:
|
|
6195
|
+
raise Exception("Session not found")
|
|
6196
|
+
|
|
6197
|
+
if session.session_data is not None and "session_state" not in session.session_data:
|
|
6198
|
+
session.session_data["session_state"] = {}
|
|
6199
|
+
|
|
6200
|
+
# Overwrite the loaded DB session state with the new session state
|
|
6201
|
+
for key, value in session_state_updates.items():
|
|
6202
|
+
session.session_data["session_state"][key] = value # type: ignore
|
|
6203
|
+
|
|
6204
|
+
self.save_session(session=session)
|
|
6205
|
+
|
|
6206
|
+
return session.session_data["session_state"] # type: ignore
|
|
6207
|
+
|
|
6208
|
+
async def aupdate_session_state(
|
|
6209
|
+
self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
|
|
6210
|
+
) -> str:
|
|
6211
|
+
"""
|
|
6212
|
+
Update the session state for the given session ID and user ID.
|
|
6213
|
+
Args:
|
|
6214
|
+
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
|
|
6215
|
+
session_id: The session ID to update. If not provided, the current cached session ID is used.
|
|
6216
|
+
Returns:
|
|
6217
|
+
dict: The updated session state.
|
|
6218
|
+
"""
|
|
6219
|
+
session_id = session_id or self.session_id
|
|
6220
|
+
if session_id is None:
|
|
6221
|
+
raise Exception("Session ID is not set")
|
|
6222
|
+
session = await self.aget_session(session_id=session_id) # type: ignore
|
|
6223
|
+
if session is None:
|
|
6224
|
+
raise Exception("Session not found")
|
|
6225
|
+
|
|
6226
|
+
if session.session_data is not None and "session_state" not in session.session_data:
|
|
6227
|
+
session.session_data["session_state"] = {}
|
|
6228
|
+
|
|
6229
|
+
for key, value in session_state_updates.items():
|
|
6230
|
+
session.session_data["session_state"][key] = value # type: ignore
|
|
6231
|
+
|
|
6232
|
+
await self.asave_session(session=session)
|
|
6233
|
+
|
|
6234
|
+
return session.session_data["session_state"] # type: ignore
|
|
6235
|
+
|
|
5921
6236
|
def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
|
|
5922
6237
|
"""Get the session metrics for the given session ID and user ID."""
|
|
5923
6238
|
session_id = session_id or self.session_id
|
|
@@ -6377,7 +6692,7 @@ class Agent:
|
|
|
6377
6692
|
system_message_content += f"{get_response_model_format_prompt(self.output_schema)}"
|
|
6378
6693
|
|
|
6379
6694
|
# 3.3.15 Add the session state to the system message
|
|
6380
|
-
if
|
|
6695
|
+
if add_session_state_to_context and session_state is not None:
|
|
6381
6696
|
system_message_content += f"\n<session_state>\n{session_state}\n</session_state>\n\n"
|
|
6382
6697
|
|
|
6383
6698
|
# Return the system message
|
|
@@ -7013,6 +7328,10 @@ class Agent:
|
|
|
7013
7328
|
for _msg in history_copy:
|
|
7014
7329
|
_msg.from_history = True
|
|
7015
7330
|
|
|
7331
|
+
# Filter tool calls from history if limit is set (before adding to run_messages)
|
|
7332
|
+
if self.max_tool_calls_from_history is not None:
|
|
7333
|
+
filter_tool_calls(history_copy, self.max_tool_calls_from_history)
|
|
7334
|
+
|
|
7016
7335
|
log_debug(f"Adding {len(history_copy)} messages from history")
|
|
7017
7336
|
|
|
7018
7337
|
run_messages.messages += history_copy
|
|
@@ -7206,6 +7525,10 @@ class Agent:
|
|
|
7206
7525
|
for _msg in history_copy:
|
|
7207
7526
|
_msg.from_history = True
|
|
7208
7527
|
|
|
7528
|
+
# Filter tool calls from history if limit is set (before adding to run_messages)
|
|
7529
|
+
if self.max_tool_calls_from_history is not None:
|
|
7530
|
+
filter_tool_calls(history_copy, self.max_tool_calls_from_history)
|
|
7531
|
+
|
|
7209
7532
|
log_debug(f"Adding {len(history_copy)} messages from history")
|
|
7210
7533
|
|
|
7211
7534
|
run_messages.messages += history_copy
|
|
@@ -7750,28 +8073,40 @@ class Agent:
|
|
|
7750
8073
|
|
|
7751
8074
|
def _handle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
|
|
7752
8075
|
if self.reasoning or self.reasoning_model is not None:
|
|
7753
|
-
reasoning_generator = self._reason(
|
|
8076
|
+
reasoning_generator = self._reason(
|
|
8077
|
+
run_response=run_response, run_messages=run_messages, stream_events=False
|
|
8078
|
+
)
|
|
7754
8079
|
|
|
7755
8080
|
# Consume the generator without yielding
|
|
7756
8081
|
deque(reasoning_generator, maxlen=0)
|
|
7757
8082
|
|
|
7758
|
-
def _handle_reasoning_stream(
|
|
8083
|
+
def _handle_reasoning_stream(
|
|
8084
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8085
|
+
) -> Iterator[RunOutputEvent]:
|
|
7759
8086
|
if self.reasoning or self.reasoning_model is not None:
|
|
7760
|
-
reasoning_generator = self._reason(
|
|
8087
|
+
reasoning_generator = self._reason(
|
|
8088
|
+
run_response=run_response,
|
|
8089
|
+
run_messages=run_messages,
|
|
8090
|
+
stream_events=stream_events,
|
|
8091
|
+
)
|
|
7761
8092
|
yield from reasoning_generator
|
|
7762
8093
|
|
|
7763
8094
|
async def _ahandle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
|
|
7764
8095
|
if self.reasoning or self.reasoning_model is not None:
|
|
7765
|
-
reason_generator = self._areason(run_response=run_response, run_messages=run_messages)
|
|
8096
|
+
reason_generator = self._areason(run_response=run_response, run_messages=run_messages, stream_events=False)
|
|
7766
8097
|
# Consume the generator without yielding
|
|
7767
8098
|
async for _ in reason_generator:
|
|
7768
8099
|
pass
|
|
7769
8100
|
|
|
7770
8101
|
async def _ahandle_reasoning_stream(
|
|
7771
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
8102
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
7772
8103
|
) -> AsyncIterator[RunOutputEvent]:
|
|
7773
8104
|
if self.reasoning or self.reasoning_model is not None:
|
|
7774
|
-
reason_generator = self._areason(
|
|
8105
|
+
reason_generator = self._areason(
|
|
8106
|
+
run_response=run_response,
|
|
8107
|
+
run_messages=run_messages,
|
|
8108
|
+
stream_events=stream_events,
|
|
8109
|
+
)
|
|
7775
8110
|
async for item in reason_generator:
|
|
7776
8111
|
yield item
|
|
7777
8112
|
|
|
@@ -7798,12 +8133,16 @@ class Agent:
|
|
|
7798
8133
|
|
|
7799
8134
|
return updated_reasoning_content
|
|
7800
8135
|
|
|
7801
|
-
def _reason(
|
|
8136
|
+
def _reason(
|
|
8137
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8138
|
+
) -> Iterator[RunOutputEvent]:
|
|
7802
8139
|
# Yield a reasoning started event
|
|
7803
|
-
if
|
|
7804
|
-
yield
|
|
8140
|
+
if stream_events:
|
|
8141
|
+
yield handle_event( # type: ignore
|
|
7805
8142
|
create_reasoning_started_event(from_run_response=run_response),
|
|
7806
8143
|
run_response,
|
|
8144
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8145
|
+
store_events=self.store_events,
|
|
7807
8146
|
)
|
|
7808
8147
|
|
|
7809
8148
|
use_default_reasoning = False
|
|
@@ -7935,14 +8274,16 @@ class Agent:
|
|
|
7935
8274
|
reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
|
|
7936
8275
|
reasoning_agent_messages=[reasoning_message],
|
|
7937
8276
|
)
|
|
7938
|
-
if
|
|
7939
|
-
yield
|
|
8277
|
+
if stream_events:
|
|
8278
|
+
yield handle_event( # type: ignore
|
|
7940
8279
|
create_reasoning_completed_event(
|
|
7941
8280
|
from_run_response=run_response,
|
|
7942
8281
|
content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
|
|
7943
8282
|
content_type=ReasoningSteps.__name__,
|
|
7944
8283
|
),
|
|
7945
8284
|
run_response,
|
|
8285
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8286
|
+
store_events=self.store_events,
|
|
7946
8287
|
)
|
|
7947
8288
|
else:
|
|
7948
8289
|
log_warning(
|
|
@@ -8011,7 +8352,7 @@ class Agent:
|
|
|
8011
8352
|
)
|
|
8012
8353
|
break
|
|
8013
8354
|
|
|
8014
|
-
if (
|
|
8355
|
+
if reasoning_agent_response.content is not None and (
|
|
8015
8356
|
reasoning_agent_response.content.reasoning_steps is None
|
|
8016
8357
|
or len(reasoning_agent_response.content.reasoning_steps) == 0
|
|
8017
8358
|
):
|
|
@@ -8021,20 +8362,22 @@ class Agent:
|
|
|
8021
8362
|
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
8022
8363
|
all_reasoning_steps.extend(reasoning_steps)
|
|
8023
8364
|
# Yield reasoning steps
|
|
8024
|
-
if
|
|
8365
|
+
if stream_events:
|
|
8025
8366
|
for reasoning_step in reasoning_steps:
|
|
8026
8367
|
updated_reasoning_content = self._format_reasoning_step_content(
|
|
8027
8368
|
run_response=run_response,
|
|
8028
8369
|
reasoning_step=reasoning_step,
|
|
8029
8370
|
)
|
|
8030
8371
|
|
|
8031
|
-
yield
|
|
8372
|
+
yield handle_event( # type: ignore
|
|
8032
8373
|
create_reasoning_step_event(
|
|
8033
8374
|
from_run_response=run_response,
|
|
8034
8375
|
reasoning_step=reasoning_step,
|
|
8035
8376
|
reasoning_content=updated_reasoning_content,
|
|
8036
8377
|
),
|
|
8037
8378
|
run_response,
|
|
8379
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8380
|
+
store_events=self.store_events,
|
|
8038
8381
|
)
|
|
8039
8382
|
|
|
8040
8383
|
# Find the index of the first assistant message
|
|
@@ -8071,22 +8414,28 @@ class Agent:
|
|
|
8071
8414
|
)
|
|
8072
8415
|
|
|
8073
8416
|
# Yield the final reasoning completed event
|
|
8074
|
-
if
|
|
8075
|
-
yield
|
|
8417
|
+
if stream_events:
|
|
8418
|
+
yield handle_event( # type: ignore
|
|
8076
8419
|
create_reasoning_completed_event(
|
|
8077
8420
|
from_run_response=run_response,
|
|
8078
8421
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
8079
8422
|
content_type=ReasoningSteps.__name__,
|
|
8080
8423
|
),
|
|
8081
8424
|
run_response,
|
|
8425
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8426
|
+
store_events=self.store_events,
|
|
8082
8427
|
)
|
|
8083
8428
|
|
|
8084
|
-
async def _areason(
|
|
8429
|
+
async def _areason(
|
|
8430
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8431
|
+
) -> Any:
|
|
8085
8432
|
# Yield a reasoning started event
|
|
8086
|
-
if
|
|
8087
|
-
yield
|
|
8433
|
+
if stream_events:
|
|
8434
|
+
yield handle_event( # type: ignore
|
|
8088
8435
|
create_reasoning_started_event(from_run_response=run_response),
|
|
8089
8436
|
run_response,
|
|
8437
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8438
|
+
store_events=self.store_events,
|
|
8090
8439
|
)
|
|
8091
8440
|
|
|
8092
8441
|
use_default_reasoning = False
|
|
@@ -8218,14 +8567,16 @@ class Agent:
|
|
|
8218
8567
|
reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
|
|
8219
8568
|
reasoning_agent_messages=[reasoning_message],
|
|
8220
8569
|
)
|
|
8221
|
-
if
|
|
8222
|
-
yield
|
|
8570
|
+
if stream_events:
|
|
8571
|
+
yield handle_event(
|
|
8223
8572
|
create_reasoning_completed_event(
|
|
8224
8573
|
from_run_response=run_response,
|
|
8225
8574
|
content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
|
|
8226
8575
|
content_type=ReasoningSteps.__name__,
|
|
8227
8576
|
),
|
|
8228
8577
|
run_response,
|
|
8578
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8579
|
+
store_events=self.store_events,
|
|
8229
8580
|
)
|
|
8230
8581
|
else:
|
|
8231
8582
|
log_warning(
|
|
@@ -8304,7 +8655,7 @@ class Agent:
|
|
|
8304
8655
|
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
8305
8656
|
all_reasoning_steps.extend(reasoning_steps)
|
|
8306
8657
|
# Yield reasoning steps
|
|
8307
|
-
if
|
|
8658
|
+
if stream_events:
|
|
8308
8659
|
for reasoning_step in reasoning_steps:
|
|
8309
8660
|
updated_reasoning_content = self._format_reasoning_step_content(
|
|
8310
8661
|
run_response=run_response,
|
|
@@ -8312,13 +8663,15 @@ class Agent:
|
|
|
8312
8663
|
)
|
|
8313
8664
|
|
|
8314
8665
|
# Yield the response with the updated reasoning_content
|
|
8315
|
-
yield
|
|
8666
|
+
yield handle_event(
|
|
8316
8667
|
create_reasoning_step_event(
|
|
8317
8668
|
from_run_response=run_response,
|
|
8318
8669
|
reasoning_step=reasoning_step,
|
|
8319
8670
|
reasoning_content=updated_reasoning_content,
|
|
8320
8671
|
),
|
|
8321
8672
|
run_response,
|
|
8673
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8674
|
+
store_events=self.store_events,
|
|
8322
8675
|
)
|
|
8323
8676
|
|
|
8324
8677
|
# Find the index of the first assistant message
|
|
@@ -8354,14 +8707,16 @@ class Agent:
|
|
|
8354
8707
|
)
|
|
8355
8708
|
|
|
8356
8709
|
# Yield the final reasoning completed event
|
|
8357
|
-
if
|
|
8358
|
-
yield
|
|
8710
|
+
if stream_events:
|
|
8711
|
+
yield handle_event(
|
|
8359
8712
|
create_reasoning_completed_event(
|
|
8360
8713
|
from_run_response=run_response,
|
|
8361
8714
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
8362
8715
|
content_type=ReasoningSteps.__name__,
|
|
8363
8716
|
),
|
|
8364
8717
|
run_response,
|
|
8718
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8719
|
+
store_events=self.store_events,
|
|
8365
8720
|
)
|
|
8366
8721
|
|
|
8367
8722
|
def _process_parser_response(
|
|
@@ -8430,18 +8785,17 @@ class Agent:
|
|
|
8430
8785
|
log_warning("A response model is required to parse the response with a parser model")
|
|
8431
8786
|
|
|
8432
8787
|
def _parse_response_with_parser_model_stream(
|
|
8433
|
-
self,
|
|
8434
|
-
session: AgentSession,
|
|
8435
|
-
run_response: RunOutput,
|
|
8436
|
-
stream_intermediate_steps: bool = True,
|
|
8788
|
+
self, session: AgentSession, run_response: RunOutput, stream_events: bool = True
|
|
8437
8789
|
):
|
|
8438
8790
|
"""Parse the model response using the parser model"""
|
|
8439
8791
|
if self.parser_model is not None:
|
|
8440
8792
|
if self.output_schema is not None:
|
|
8441
|
-
if
|
|
8442
|
-
yield
|
|
8793
|
+
if stream_events:
|
|
8794
|
+
yield handle_event(
|
|
8443
8795
|
create_parser_model_response_started_event(run_response),
|
|
8444
8796
|
run_response,
|
|
8797
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8798
|
+
store_events=self.store_events,
|
|
8445
8799
|
)
|
|
8446
8800
|
|
|
8447
8801
|
parser_model_response = ModelResponse(content="")
|
|
@@ -8460,7 +8814,7 @@ class Agent:
|
|
|
8460
8814
|
model_response=parser_model_response,
|
|
8461
8815
|
model_response_event=model_response_event,
|
|
8462
8816
|
parse_structured_output=True,
|
|
8463
|
-
|
|
8817
|
+
stream_events=stream_events,
|
|
8464
8818
|
)
|
|
8465
8819
|
|
|
8466
8820
|
parser_model_response_message: Optional[Message] = None
|
|
@@ -8474,28 +8828,29 @@ class Agent:
|
|
|
8474
8828
|
else:
|
|
8475
8829
|
log_warning("Unable to parse response with parser model")
|
|
8476
8830
|
|
|
8477
|
-
if
|
|
8478
|
-
yield
|
|
8831
|
+
if stream_events:
|
|
8832
|
+
yield handle_event(
|
|
8479
8833
|
create_parser_model_response_completed_event(run_response),
|
|
8480
8834
|
run_response,
|
|
8835
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8836
|
+
store_events=self.store_events,
|
|
8481
8837
|
)
|
|
8482
8838
|
|
|
8483
8839
|
else:
|
|
8484
8840
|
log_warning("A response model is required to parse the response with a parser model")
|
|
8485
8841
|
|
|
8486
8842
|
async def _aparse_response_with_parser_model_stream(
|
|
8487
|
-
self,
|
|
8488
|
-
session: AgentSession,
|
|
8489
|
-
run_response: RunOutput,
|
|
8490
|
-
stream_intermediate_steps: bool = True,
|
|
8843
|
+
self, session: AgentSession, run_response: RunOutput, stream_events: bool = True
|
|
8491
8844
|
):
|
|
8492
8845
|
"""Parse the model response using the parser model stream."""
|
|
8493
8846
|
if self.parser_model is not None:
|
|
8494
8847
|
if self.output_schema is not None:
|
|
8495
|
-
if
|
|
8496
|
-
yield
|
|
8848
|
+
if stream_events:
|
|
8849
|
+
yield handle_event(
|
|
8497
8850
|
create_parser_model_response_started_event(run_response),
|
|
8498
8851
|
run_response,
|
|
8852
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8853
|
+
store_events=self.store_events,
|
|
8499
8854
|
)
|
|
8500
8855
|
|
|
8501
8856
|
parser_model_response = ModelResponse(content="")
|
|
@@ -8515,7 +8870,7 @@ class Agent:
|
|
|
8515
8870
|
model_response=parser_model_response,
|
|
8516
8871
|
model_response_event=model_response_event,
|
|
8517
8872
|
parse_structured_output=True,
|
|
8518
|
-
|
|
8873
|
+
stream_events=stream_events,
|
|
8519
8874
|
):
|
|
8520
8875
|
yield event
|
|
8521
8876
|
|
|
@@ -8530,10 +8885,12 @@ class Agent:
|
|
|
8530
8885
|
else:
|
|
8531
8886
|
log_warning("Unable to parse response with parser model")
|
|
8532
8887
|
|
|
8533
|
-
if
|
|
8534
|
-
yield
|
|
8888
|
+
if stream_events:
|
|
8889
|
+
yield handle_event(
|
|
8535
8890
|
create_parser_model_response_completed_event(run_response),
|
|
8536
8891
|
run_response,
|
|
8892
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8893
|
+
store_events=self.store_events,
|
|
8537
8894
|
)
|
|
8538
8895
|
else:
|
|
8539
8896
|
log_warning("A response model is required to parse the response with a parser model")
|
|
@@ -8552,7 +8909,7 @@ class Agent:
|
|
|
8552
8909
|
session: AgentSession,
|
|
8553
8910
|
run_response: RunOutput,
|
|
8554
8911
|
run_messages: RunMessages,
|
|
8555
|
-
|
|
8912
|
+
stream_events: bool = False,
|
|
8556
8913
|
):
|
|
8557
8914
|
"""Parse the model response using the output model."""
|
|
8558
8915
|
from agno.utils.events import (
|
|
@@ -8563,8 +8920,13 @@ class Agent:
|
|
|
8563
8920
|
if self.output_model is None:
|
|
8564
8921
|
return
|
|
8565
8922
|
|
|
8566
|
-
if
|
|
8567
|
-
yield
|
|
8923
|
+
if stream_events:
|
|
8924
|
+
yield handle_event(
|
|
8925
|
+
create_output_model_response_started_event(run_response),
|
|
8926
|
+
run_response,
|
|
8927
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8928
|
+
store_events=self.store_events,
|
|
8929
|
+
)
|
|
8568
8930
|
|
|
8569
8931
|
messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
|
|
8570
8932
|
|
|
@@ -8576,11 +8938,16 @@ class Agent:
|
|
|
8576
8938
|
run_response=run_response,
|
|
8577
8939
|
model_response=model_response,
|
|
8578
8940
|
model_response_event=model_response_event,
|
|
8579
|
-
|
|
8941
|
+
stream_events=stream_events,
|
|
8580
8942
|
)
|
|
8581
8943
|
|
|
8582
|
-
if
|
|
8583
|
-
yield
|
|
8944
|
+
if stream_events:
|
|
8945
|
+
yield handle_event(
|
|
8946
|
+
create_output_model_response_completed_event(run_response),
|
|
8947
|
+
run_response,
|
|
8948
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8949
|
+
store_events=self.store_events,
|
|
8950
|
+
)
|
|
8584
8951
|
|
|
8585
8952
|
# Build a list of messages that should be added to the RunResponse
|
|
8586
8953
|
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
@@ -8603,7 +8970,7 @@ class Agent:
|
|
|
8603
8970
|
session: AgentSession,
|
|
8604
8971
|
run_response: RunOutput,
|
|
8605
8972
|
run_messages: RunMessages,
|
|
8606
|
-
|
|
8973
|
+
stream_events: bool = False,
|
|
8607
8974
|
):
|
|
8608
8975
|
"""Parse the model response using the output model."""
|
|
8609
8976
|
from agno.utils.events import (
|
|
@@ -8614,8 +8981,13 @@ class Agent:
|
|
|
8614
8981
|
if self.output_model is None:
|
|
8615
8982
|
return
|
|
8616
8983
|
|
|
8617
|
-
if
|
|
8618
|
-
yield
|
|
8984
|
+
if stream_events:
|
|
8985
|
+
yield handle_event(
|
|
8986
|
+
create_output_model_response_started_event(run_response),
|
|
8987
|
+
run_response,
|
|
8988
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8989
|
+
store_events=self.store_events,
|
|
8990
|
+
)
|
|
8619
8991
|
|
|
8620
8992
|
messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
|
|
8621
8993
|
|
|
@@ -8629,12 +9001,17 @@ class Agent:
|
|
|
8629
9001
|
run_response=run_response,
|
|
8630
9002
|
model_response=model_response,
|
|
8631
9003
|
model_response_event=model_response_event,
|
|
8632
|
-
|
|
9004
|
+
stream_events=stream_events,
|
|
8633
9005
|
):
|
|
8634
9006
|
yield event
|
|
8635
9007
|
|
|
8636
|
-
if
|
|
8637
|
-
yield
|
|
9008
|
+
if stream_events:
|
|
9009
|
+
yield handle_event(
|
|
9010
|
+
create_output_model_response_completed_event(run_response),
|
|
9011
|
+
run_response,
|
|
9012
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
9013
|
+
store_events=self.store_events,
|
|
9014
|
+
)
|
|
8638
9015
|
|
|
8639
9016
|
# Build a list of messages that should be added to the RunResponse
|
|
8640
9017
|
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
@@ -8643,15 +9020,6 @@ class Agent:
|
|
|
8643
9020
|
# Update the RunResponse metrics
|
|
8644
9021
|
run_response.metrics = self._calculate_run_metrics(messages_for_run_response)
|
|
8645
9022
|
|
|
8646
|
-
def _handle_event(self, event: RunOutputEvent, run_response: RunOutput):
|
|
8647
|
-
# We only store events that are not run_response_content events
|
|
8648
|
-
events_to_skip = [event.value for event in self.events_to_skip] if self.events_to_skip else []
|
|
8649
|
-
if self.store_events and event.event not in events_to_skip:
|
|
8650
|
-
if run_response.events is None:
|
|
8651
|
-
run_response.events = []
|
|
8652
|
-
run_response.events.append(event)
|
|
8653
|
-
return event
|
|
8654
|
-
|
|
8655
9023
|
###########################################################################
|
|
8656
9024
|
# Default Tools
|
|
8657
9025
|
###########################################################################
|
|
@@ -8779,7 +9147,7 @@ class Agent:
|
|
|
8779
9147
|
|
|
8780
9148
|
return get_tool_call_history
|
|
8781
9149
|
|
|
8782
|
-
def
|
|
9150
|
+
def _update_session_state_tool(self, session_state, session_state_updates: dict) -> str:
|
|
8783
9151
|
"""
|
|
8784
9152
|
Update the shared session state. Provide any updates as a dictionary of key-value pairs.
|
|
8785
9153
|
Example:
|
|
@@ -9125,6 +9493,7 @@ class Agent:
|
|
|
9125
9493
|
videos: Optional[Sequence[Video]] = None,
|
|
9126
9494
|
files: Optional[Sequence[File]] = None,
|
|
9127
9495
|
stream: Optional[bool] = None,
|
|
9496
|
+
stream_events: Optional[bool] = None,
|
|
9128
9497
|
stream_intermediate_steps: Optional[bool] = None,
|
|
9129
9498
|
markdown: Optional[bool] = None,
|
|
9130
9499
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -9156,11 +9525,19 @@ class Agent:
|
|
|
9156
9525
|
if self.output_schema is not None:
|
|
9157
9526
|
markdown = False
|
|
9158
9527
|
|
|
9528
|
+
# Use stream override value when necessary
|
|
9159
9529
|
if stream is None:
|
|
9160
|
-
stream = self.stream
|
|
9530
|
+
stream = False if self.stream is None else self.stream
|
|
9531
|
+
|
|
9532
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
9533
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
9534
|
+
|
|
9535
|
+
# Can't stream events if streaming is disabled
|
|
9536
|
+
if stream is False:
|
|
9537
|
+
stream_events = False
|
|
9161
9538
|
|
|
9162
|
-
if
|
|
9163
|
-
|
|
9539
|
+
if stream_events is None:
|
|
9540
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
9164
9541
|
|
|
9165
9542
|
if stream:
|
|
9166
9543
|
print_response_stream(
|
|
@@ -9173,7 +9550,7 @@ class Agent:
|
|
|
9173
9550
|
images=images,
|
|
9174
9551
|
videos=videos,
|
|
9175
9552
|
files=files,
|
|
9176
|
-
|
|
9553
|
+
stream_events=stream_events,
|
|
9177
9554
|
knowledge_filters=knowledge_filters,
|
|
9178
9555
|
debug_mode=debug_mode,
|
|
9179
9556
|
markdown=markdown,
|
|
@@ -9201,7 +9578,7 @@ class Agent:
|
|
|
9201
9578
|
images=images,
|
|
9202
9579
|
videos=videos,
|
|
9203
9580
|
files=files,
|
|
9204
|
-
|
|
9581
|
+
stream_events=stream_events,
|
|
9205
9582
|
knowledge_filters=knowledge_filters,
|
|
9206
9583
|
debug_mode=debug_mode,
|
|
9207
9584
|
markdown=markdown,
|
|
@@ -9230,6 +9607,7 @@ class Agent:
|
|
|
9230
9607
|
videos: Optional[Sequence[Video]] = None,
|
|
9231
9608
|
files: Optional[Sequence[File]] = None,
|
|
9232
9609
|
stream: Optional[bool] = None,
|
|
9610
|
+
stream_events: Optional[bool] = None,
|
|
9233
9611
|
stream_intermediate_steps: Optional[bool] = None,
|
|
9234
9612
|
markdown: Optional[bool] = None,
|
|
9235
9613
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -9259,8 +9637,15 @@ class Agent:
|
|
|
9259
9637
|
if stream is None:
|
|
9260
9638
|
stream = self.stream or False
|
|
9261
9639
|
|
|
9262
|
-
|
|
9263
|
-
|
|
9640
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
9641
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
9642
|
+
|
|
9643
|
+
# Can't stream events if streaming is disabled
|
|
9644
|
+
if stream is False:
|
|
9645
|
+
stream_events = False
|
|
9646
|
+
|
|
9647
|
+
if stream_events is None:
|
|
9648
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
9264
9649
|
|
|
9265
9650
|
if stream:
|
|
9266
9651
|
await aprint_response_stream(
|
|
@@ -9273,7 +9658,7 @@ class Agent:
|
|
|
9273
9658
|
images=images,
|
|
9274
9659
|
videos=videos,
|
|
9275
9660
|
files=files,
|
|
9276
|
-
|
|
9661
|
+
stream_events=stream_events,
|
|
9277
9662
|
knowledge_filters=knowledge_filters,
|
|
9278
9663
|
debug_mode=debug_mode,
|
|
9279
9664
|
markdown=markdown,
|
|
@@ -9300,7 +9685,6 @@ class Agent:
|
|
|
9300
9685
|
images=images,
|
|
9301
9686
|
videos=videos,
|
|
9302
9687
|
files=files,
|
|
9303
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
9304
9688
|
knowledge_filters=knowledge_filters,
|
|
9305
9689
|
debug_mode=debug_mode,
|
|
9306
9690
|
markdown=markdown,
|
|
@@ -9436,108 +9820,70 @@ class Agent:
|
|
|
9436
9820
|
|
|
9437
9821
|
return effective_filters
|
|
9438
9822
|
|
|
9439
|
-
def
|
|
9440
|
-
|
|
9441
|
-
|
|
9442
|
-
This includes media in input, output artifacts, and all messages.
|
|
9443
|
-
"""
|
|
9444
|
-
# 1. Scrub RunInput media
|
|
9445
|
-
if run_response.input is not None:
|
|
9446
|
-
run_response.input.images = []
|
|
9447
|
-
run_response.input.videos = []
|
|
9448
|
-
run_response.input.audios = []
|
|
9449
|
-
run_response.input.files = []
|
|
9450
|
-
|
|
9451
|
-
# 3. Scrub media from all messages
|
|
9452
|
-
if run_response.messages:
|
|
9453
|
-
for message in run_response.messages:
|
|
9454
|
-
self._scrub_media_from_message(message)
|
|
9455
|
-
|
|
9456
|
-
# 4. Scrub media from additional_input messages if any
|
|
9457
|
-
if run_response.additional_input:
|
|
9458
|
-
for message in run_response.additional_input:
|
|
9459
|
-
self._scrub_media_from_message(message)
|
|
9460
|
-
|
|
9461
|
-
# 5. Scrub media from reasoning_messages if any
|
|
9462
|
-
if run_response.reasoning_messages:
|
|
9463
|
-
for message in run_response.reasoning_messages:
|
|
9464
|
-
self._scrub_media_from_message(message)
|
|
9465
|
-
|
|
9466
|
-
def _scrub_media_from_message(self, message: Message) -> None:
|
|
9467
|
-
"""Remove all media from a Message object."""
|
|
9468
|
-
# Input media
|
|
9469
|
-
message.images = None
|
|
9470
|
-
message.videos = None
|
|
9471
|
-
message.audio = None
|
|
9472
|
-
message.files = None
|
|
9473
|
-
|
|
9474
|
-
# Output media
|
|
9475
|
-
message.audio_output = None
|
|
9476
|
-
message.image_output = None
|
|
9477
|
-
message.video_output = None
|
|
9478
|
-
|
|
9479
|
-
def _scrub_tool_results_from_run_output(self, run_response: RunOutput) -> None:
|
|
9480
|
-
"""
|
|
9481
|
-
Remove all tool-related data from RunOutput when store_tool_messages=False.
|
|
9482
|
-
This removes both the tool call and its corresponding result to maintain API consistency.
|
|
9483
|
-
"""
|
|
9484
|
-
if not run_response.messages:
|
|
9485
|
-
return
|
|
9823
|
+
def _cleanup_and_store(self, run_response: RunOutput, session: AgentSession, user_id: Optional[str] = None) -> None:
|
|
9824
|
+
# Scrub the stored run based on storage flags
|
|
9825
|
+
self._scrub_run_output_for_storage(run_response)
|
|
9486
9826
|
|
|
9487
|
-
#
|
|
9488
|
-
|
|
9489
|
-
|
|
9490
|
-
if message.role == "tool" and message.tool_call_id:
|
|
9491
|
-
tool_call_ids_to_remove.add(message.tool_call_id)
|
|
9492
|
-
|
|
9493
|
-
# Step 2: Remove tool result messages (role="tool")
|
|
9494
|
-
run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
|
|
9495
|
-
|
|
9496
|
-
# Step 3: Remove the assistant messages related to the scrubbed tool calls
|
|
9497
|
-
filtered_messages = []
|
|
9498
|
-
for message in run_response.messages:
|
|
9499
|
-
# Check if this assistant message made any of the tool calls we're removing
|
|
9500
|
-
should_remove = False
|
|
9501
|
-
if message.role == "assistant" and message.tool_calls:
|
|
9502
|
-
for tool_call in message.tool_calls:
|
|
9503
|
-
if tool_call.get("id") in tool_call_ids_to_remove:
|
|
9504
|
-
should_remove = True
|
|
9505
|
-
break
|
|
9827
|
+
# Stop the timer for the Run duration
|
|
9828
|
+
if run_response.metrics:
|
|
9829
|
+
run_response.metrics.stop_timer()
|
|
9506
9830
|
|
|
9507
|
-
|
|
9508
|
-
|
|
9831
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
9832
|
+
self.save_run_response_to_file(
|
|
9833
|
+
run_response=run_response,
|
|
9834
|
+
input=run_response.input.input_content_string() if run_response.input else "",
|
|
9835
|
+
session_id=session.session_id,
|
|
9836
|
+
user_id=user_id,
|
|
9837
|
+
)
|
|
9509
9838
|
|
|
9510
|
-
|
|
9839
|
+
# Add RunOutput to Agent Session
|
|
9840
|
+
session.upsert_run(run=run_response)
|
|
9511
9841
|
|
|
9512
|
-
|
|
9513
|
-
|
|
9514
|
-
|
|
9515
|
-
|
|
9516
|
-
|
|
9517
|
-
|
|
9518
|
-
|
|
9519
|
-
|
|
9842
|
+
# Calculate session metrics
|
|
9843
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
9844
|
+
|
|
9845
|
+
# Save session to memory
|
|
9846
|
+
self.save_session(session=session)
|
|
9847
|
+
|
|
9848
|
+
async def _acleanup_and_store(
|
|
9849
|
+
self, run_response: RunOutput, session: AgentSession, user_id: Optional[str] = None
|
|
9850
|
+
) -> None:
|
|
9851
|
+
# Scrub the stored run based on storage flags
|
|
9852
|
+
self._scrub_run_output_for_storage(run_response)
|
|
9853
|
+
|
|
9854
|
+
# Stop the timer for the Run duration
|
|
9855
|
+
if run_response.metrics:
|
|
9856
|
+
run_response.metrics.stop_timer()
|
|
9857
|
+
|
|
9858
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
9859
|
+
self.save_run_response_to_file(
|
|
9860
|
+
run_response=run_response,
|
|
9861
|
+
input=run_response.input.input_content_string() if run_response.input else "",
|
|
9862
|
+
session_id=session.session_id,
|
|
9863
|
+
user_id=user_id,
|
|
9864
|
+
)
|
|
9520
9865
|
|
|
9521
|
-
|
|
9866
|
+
# Add RunOutput to Agent Session
|
|
9867
|
+
session.upsert_run(run=run_response)
|
|
9868
|
+
|
|
9869
|
+
# Calculate session metrics
|
|
9870
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
9871
|
+
|
|
9872
|
+
# Save session to storage
|
|
9873
|
+
await self.asave_session(session=session)
|
|
9874
|
+
|
|
9875
|
+
def _scrub_run_output_for_storage(self, run_response: RunOutput) -> None:
|
|
9522
9876
|
"""
|
|
9523
9877
|
Scrub run output based on storage flags before persisting to database.
|
|
9524
|
-
Returns True if any scrubbing was done, False otherwise.
|
|
9525
9878
|
"""
|
|
9526
|
-
scrubbed = False
|
|
9527
|
-
|
|
9528
9879
|
if not self.store_media:
|
|
9529
|
-
|
|
9530
|
-
scrubbed = True
|
|
9880
|
+
scrub_media_from_run_output(run_response)
|
|
9531
9881
|
|
|
9532
9882
|
if not self.store_tool_messages:
|
|
9533
|
-
|
|
9534
|
-
scrubbed = True
|
|
9883
|
+
scrub_tool_results_from_run_output(run_response)
|
|
9535
9884
|
|
|
9536
9885
|
if not self.store_history_messages:
|
|
9537
|
-
|
|
9538
|
-
scrubbed = True
|
|
9539
|
-
|
|
9540
|
-
return scrubbed
|
|
9886
|
+
scrub_history_messages_from_run_output(run_response)
|
|
9541
9887
|
|
|
9542
9888
|
def _validate_media_object_id(
|
|
9543
9889
|
self,
|