agno 2.1.10__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1578 -1247
- agno/models/anthropic/claude.py +2 -2
- agno/models/ollama/chat.py +7 -2
- agno/os/app.py +1 -1
- agno/os/interfaces/a2a/router.py +2 -2
- agno/os/interfaces/agui/router.py +2 -2
- agno/os/router.py +7 -7
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/health.py +6 -2
- agno/os/routers/knowledge/schemas.py +49 -47
- agno/os/routers/memory/schemas.py +16 -16
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +382 -7
- agno/os/schema.py +254 -231
- agno/os/utils.py +1 -1
- agno/run/agent.py +49 -1
- agno/run/team.py +43 -0
- agno/session/summary.py +45 -13
- agno/session/team.py +90 -5
- agno/team/team.py +1117 -856
- agno/utils/agent.py +372 -0
- agno/utils/events.py +144 -2
- agno/utils/print_response/agent.py +10 -6
- agno/utils/print_response/team.py +6 -4
- agno/utils/print_response/workflow.py +7 -5
- agno/utils/team.py +9 -8
- agno/workflow/condition.py +17 -9
- agno/workflow/loop.py +18 -10
- agno/workflow/parallel.py +14 -6
- agno/workflow/router.py +16 -8
- agno/workflow/step.py +14 -6
- agno/workflow/steps.py +14 -6
- agno/workflow/workflow.py +245 -122
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/METADATA +60 -23
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/RECORD +38 -37
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/WHEEL +0 -0
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -64,15 +64,28 @@ from agno.run.cancel import (
|
|
|
64
64
|
)
|
|
65
65
|
from agno.run.messages import RunMessages
|
|
66
66
|
from agno.run.team import TeamRunOutputEvent
|
|
67
|
-
from agno.session import AgentSession, SessionSummaryManager
|
|
67
|
+
from agno.session import AgentSession, SessionSummaryManager, TeamSession, WorkflowSession
|
|
68
68
|
from agno.tools import Toolkit
|
|
69
69
|
from agno.tools.function import Function
|
|
70
|
+
from agno.utils.agent import (
|
|
71
|
+
await_for_background_tasks,
|
|
72
|
+
await_for_background_tasks_stream,
|
|
73
|
+
collect_joint_audios,
|
|
74
|
+
collect_joint_files,
|
|
75
|
+
collect_joint_images,
|
|
76
|
+
collect_joint_videos,
|
|
77
|
+
scrub_history_messages_from_run_output,
|
|
78
|
+
scrub_media_from_run_output,
|
|
79
|
+
scrub_tool_results_from_run_output,
|
|
80
|
+
wait_for_background_tasks,
|
|
81
|
+
wait_for_background_tasks_stream,
|
|
82
|
+
)
|
|
70
83
|
from agno.utils.common import is_typed_dict, validate_typed_dict
|
|
71
84
|
from agno.utils.events import (
|
|
72
|
-
create_memory_update_completed_event,
|
|
73
|
-
create_memory_update_started_event,
|
|
74
85
|
create_parser_model_response_completed_event,
|
|
75
86
|
create_parser_model_response_started_event,
|
|
87
|
+
create_post_hook_completed_event,
|
|
88
|
+
create_post_hook_started_event,
|
|
76
89
|
create_pre_hook_completed_event,
|
|
77
90
|
create_pre_hook_started_event,
|
|
78
91
|
create_reasoning_completed_event,
|
|
@@ -80,13 +93,17 @@ from agno.utils.events import (
|
|
|
80
93
|
create_reasoning_step_event,
|
|
81
94
|
create_run_cancelled_event,
|
|
82
95
|
create_run_completed_event,
|
|
96
|
+
create_run_content_completed_event,
|
|
83
97
|
create_run_continued_event,
|
|
84
98
|
create_run_error_event,
|
|
85
99
|
create_run_output_content_event,
|
|
86
100
|
create_run_paused_event,
|
|
87
101
|
create_run_started_event,
|
|
102
|
+
create_session_summary_completed_event,
|
|
103
|
+
create_session_summary_started_event,
|
|
88
104
|
create_tool_call_completed_event,
|
|
89
105
|
create_tool_call_started_event,
|
|
106
|
+
handle_event,
|
|
90
107
|
)
|
|
91
108
|
from agno.utils.hooks import filter_hook_args, normalize_hooks
|
|
92
109
|
from agno.utils.knowledge import get_agentic_or_user_search_filters
|
|
@@ -332,7 +349,9 @@ class Agent:
|
|
|
332
349
|
# Stream the response from the Agent
|
|
333
350
|
stream: Optional[bool] = None
|
|
334
351
|
# Stream the intermediate steps from the Agent
|
|
335
|
-
|
|
352
|
+
stream_events: Optional[bool] = None
|
|
353
|
+
# [Deprecated] Stream the intermediate steps from the Agent
|
|
354
|
+
stream_intermediate_steps: Optional[bool] = None
|
|
336
355
|
|
|
337
356
|
# Persist the events on the run response
|
|
338
357
|
store_events: bool = False
|
|
@@ -456,7 +475,8 @@ class Agent:
|
|
|
456
475
|
use_json_mode: bool = False,
|
|
457
476
|
save_response_to_file: Optional[str] = None,
|
|
458
477
|
stream: Optional[bool] = None,
|
|
459
|
-
|
|
478
|
+
stream_events: Optional[bool] = None,
|
|
479
|
+
stream_intermediate_steps: Optional[bool] = None,
|
|
460
480
|
store_events: bool = False,
|
|
461
481
|
events_to_skip: Optional[List[RunEvent]] = None,
|
|
462
482
|
role: Optional[str] = None,
|
|
@@ -501,11 +521,6 @@ class Agent:
|
|
|
501
521
|
self.add_history_to_context = add_history_to_context
|
|
502
522
|
self.num_history_runs = num_history_runs
|
|
503
523
|
|
|
504
|
-
if add_history_to_context and not db:
|
|
505
|
-
log_warning(
|
|
506
|
-
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
507
|
-
)
|
|
508
|
-
|
|
509
524
|
self.store_media = store_media
|
|
510
525
|
self.store_tool_messages = store_tool_messages
|
|
511
526
|
self.store_history_messages = store_history_messages
|
|
@@ -574,7 +589,7 @@ class Agent:
|
|
|
574
589
|
self.save_response_to_file = save_response_to_file
|
|
575
590
|
|
|
576
591
|
self.stream = stream
|
|
577
|
-
self.
|
|
592
|
+
self.stream_events = stream_events or stream_intermediate_steps
|
|
578
593
|
|
|
579
594
|
self.store_events = store_events
|
|
580
595
|
self.role = role
|
|
@@ -607,6 +622,22 @@ class Agent:
|
|
|
607
622
|
|
|
608
623
|
self._hooks_normalised = False
|
|
609
624
|
|
|
625
|
+
# Lazy-initialized shared thread pool executor for background tasks (memory, cultural knowledge, etc.)
|
|
626
|
+
self._background_executor: Optional[Any] = None
|
|
627
|
+
|
|
628
|
+
@property
|
|
629
|
+
def background_executor(self) -> Any:
|
|
630
|
+
"""Lazy initialization of shared thread pool executor for background tasks.
|
|
631
|
+
|
|
632
|
+
Handles both memory creation and cultural knowledge updates concurrently.
|
|
633
|
+
Initialized only on first use (runtime, not instantiation) and reused across runs.
|
|
634
|
+
"""
|
|
635
|
+
if self._background_executor is None:
|
|
636
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
637
|
+
|
|
638
|
+
self._background_executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix="agno-bg")
|
|
639
|
+
return self._background_executor
|
|
640
|
+
|
|
610
641
|
def set_id(self) -> None:
|
|
611
642
|
if self.id is None:
|
|
612
643
|
self.id = generate_id_from_name(self.name)
|
|
@@ -779,11 +810,9 @@ class Agent:
|
|
|
779
810
|
|
|
780
811
|
def _initialize_session(
|
|
781
812
|
self,
|
|
782
|
-
run_id: str,
|
|
783
813
|
session_id: Optional[str] = None,
|
|
784
814
|
user_id: Optional[str] = None,
|
|
785
|
-
|
|
786
|
-
) -> Tuple[str, Optional[str], Dict[str, Any]]:
|
|
815
|
+
) -> Tuple[str, Optional[str]]:
|
|
787
816
|
"""Initialize the session for the agent."""
|
|
788
817
|
|
|
789
818
|
if session_id is None:
|
|
@@ -800,26 +829,23 @@ class Agent:
|
|
|
800
829
|
if user_id is None or user_id == "":
|
|
801
830
|
user_id = self.user_id
|
|
802
831
|
|
|
803
|
-
|
|
804
|
-
if session_state is None:
|
|
805
|
-
session_state = self.session_state or {}
|
|
806
|
-
else:
|
|
807
|
-
# If run session_state is provided, merge agent defaults under it
|
|
808
|
-
# This ensures run state takes precedence over agent defaults
|
|
809
|
-
if self.session_state:
|
|
810
|
-
base_state = self.session_state.copy()
|
|
811
|
-
merge_dictionaries(base_state, session_state)
|
|
812
|
-
session_state.clear()
|
|
813
|
-
session_state.update(base_state)
|
|
832
|
+
return session_id, user_id
|
|
814
833
|
|
|
815
|
-
|
|
834
|
+
def _initialize_session_state(
|
|
835
|
+
self,
|
|
836
|
+
session_state: Dict[str, Any],
|
|
837
|
+
user_id: Optional[str] = None,
|
|
838
|
+
session_id: Optional[str] = None,
|
|
839
|
+
run_id: Optional[str] = None,
|
|
840
|
+
) -> Dict[str, Any]:
|
|
841
|
+
"""Initialize the session state for the agent."""
|
|
842
|
+
if user_id:
|
|
816
843
|
session_state["current_user_id"] = user_id
|
|
817
844
|
if session_id is not None:
|
|
818
845
|
session_state["current_session_id"] = session_id
|
|
819
846
|
if run_id is not None:
|
|
820
847
|
session_state["current_run_id"] = run_id
|
|
821
|
-
|
|
822
|
-
return session_id, user_id, session_state # type: ignore
|
|
848
|
+
return session_state
|
|
823
849
|
|
|
824
850
|
def _run(
|
|
825
851
|
self,
|
|
@@ -841,16 +867,18 @@ class Agent:
|
|
|
841
867
|
|
|
842
868
|
Steps:
|
|
843
869
|
1. Execute pre-hooks
|
|
844
|
-
2.
|
|
845
|
-
3.
|
|
846
|
-
4.
|
|
847
|
-
5.
|
|
848
|
-
6.
|
|
849
|
-
7.
|
|
850
|
-
8.
|
|
851
|
-
9.
|
|
852
|
-
10.
|
|
853
|
-
11.
|
|
870
|
+
2. Determine tools for model
|
|
871
|
+
3. Prepare run messages
|
|
872
|
+
4. Start memory creation in background thread
|
|
873
|
+
5. Reason about the task if reasoning is enabled
|
|
874
|
+
6. Generate a response from the Model (includes running function calls)
|
|
875
|
+
7. Update the RunOutput with the model response
|
|
876
|
+
8. Store media if enabled
|
|
877
|
+
9. Convert the response to the structured format if needed
|
|
878
|
+
10. Execute post-hooks
|
|
879
|
+
11. Wait for background memory creation and cultural knowledge creation
|
|
880
|
+
12. Create session summary
|
|
881
|
+
13. Cleanup and store the run response and session
|
|
854
882
|
"""
|
|
855
883
|
|
|
856
884
|
# Register run for cancellation tracking
|
|
@@ -876,6 +904,7 @@ class Agent:
|
|
|
876
904
|
# Consume the generator without yielding
|
|
877
905
|
deque(pre_hook_iterator, maxlen=0)
|
|
878
906
|
|
|
907
|
+
# 2. Determine tools for model
|
|
879
908
|
self._determine_tools_for_model(
|
|
880
909
|
model=self.model,
|
|
881
910
|
run_response=run_response,
|
|
@@ -887,7 +916,7 @@ class Agent:
|
|
|
887
916
|
knowledge_filters=knowledge_filters,
|
|
888
917
|
)
|
|
889
918
|
|
|
890
|
-
#
|
|
919
|
+
# 3. Prepare run messages
|
|
891
920
|
run_messages: RunMessages = self._get_run_messages(
|
|
892
921
|
run_response=run_response,
|
|
893
922
|
input=run_input.input_content,
|
|
@@ -911,114 +940,132 @@ class Agent:
|
|
|
911
940
|
|
|
912
941
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
913
942
|
|
|
914
|
-
#
|
|
915
|
-
|
|
943
|
+
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
944
|
+
memory_future = None
|
|
945
|
+
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
946
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
947
|
+
log_debug("Starting memory creation in background thread.")
|
|
948
|
+
memory_future = self.background_executor.submit(
|
|
949
|
+
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
950
|
+
)
|
|
916
951
|
|
|
917
|
-
#
|
|
918
|
-
|
|
952
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
953
|
+
cultural_knowledge_future = None
|
|
954
|
+
if (
|
|
955
|
+
run_messages.user_message is not None
|
|
956
|
+
and self.culture_manager is not None
|
|
957
|
+
and self.update_cultural_knowledge
|
|
958
|
+
):
|
|
959
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
960
|
+
cultural_knowledge_future = self.background_executor.submit(
|
|
961
|
+
self._make_cultural_knowledge, run_messages=run_messages
|
|
962
|
+
)
|
|
919
963
|
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
functions=self._functions_for_model,
|
|
926
|
-
tool_choice=self.tool_choice,
|
|
927
|
-
tool_call_limit=self.tool_call_limit,
|
|
928
|
-
response_format=response_format,
|
|
929
|
-
run_response=run_response,
|
|
930
|
-
send_media_to_model=self.send_media_to_model,
|
|
931
|
-
)
|
|
964
|
+
try:
|
|
965
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
966
|
+
|
|
967
|
+
# 5. Reason about the task
|
|
968
|
+
self._handle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
932
969
|
|
|
933
|
-
|
|
934
|
-
|
|
970
|
+
# Check for cancellation before model call
|
|
971
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
935
972
|
|
|
936
|
-
|
|
937
|
-
|
|
973
|
+
# 6. Generate a response from the Model (includes running function calls)
|
|
974
|
+
self.model = cast(Model, self.model)
|
|
975
|
+
model_response: ModelResponse = self.model.response(
|
|
976
|
+
messages=run_messages.messages,
|
|
977
|
+
tools=self._tools_for_model,
|
|
978
|
+
functions=self._functions_for_model,
|
|
979
|
+
tool_choice=self.tool_choice,
|
|
980
|
+
tool_call_limit=self.tool_call_limit,
|
|
981
|
+
response_format=response_format,
|
|
982
|
+
run_response=run_response,
|
|
983
|
+
send_media_to_model=self.send_media_to_model,
|
|
984
|
+
)
|
|
938
985
|
|
|
939
|
-
|
|
940
|
-
|
|
986
|
+
# Check for cancellation after model call
|
|
987
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
941
988
|
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
model_response=model_response,
|
|
945
|
-
run_response=run_response,
|
|
946
|
-
run_messages=run_messages,
|
|
947
|
-
)
|
|
989
|
+
# If an output model is provided, generate output using the output model
|
|
990
|
+
self._generate_response_with_output_model(model_response, run_messages)
|
|
948
991
|
|
|
949
|
-
|
|
950
|
-
self.
|
|
992
|
+
# If a parser model is provided, structure the response separately
|
|
993
|
+
self._parse_response_with_parser_model(model_response, run_messages)
|
|
951
994
|
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
run_response=run_response,
|
|
956
|
-
run_messages=run_messages,
|
|
957
|
-
session=session,
|
|
958
|
-
user_id=user_id,
|
|
995
|
+
# 7. Update the RunOutput with the model response
|
|
996
|
+
self._update_run_response(
|
|
997
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
959
998
|
)
|
|
960
999
|
|
|
961
|
-
|
|
962
|
-
|
|
1000
|
+
# We should break out of the run function
|
|
1001
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1002
|
+
wait_for_background_tasks(
|
|
1003
|
+
memory_future=memory_future, cultural_knowledge_future=cultural_knowledge_future
|
|
1004
|
+
)
|
|
963
1005
|
|
|
964
|
-
|
|
965
|
-
if self.post_hooks is not None:
|
|
966
|
-
self._execute_post_hooks(
|
|
967
|
-
hooks=self.post_hooks, # type: ignore
|
|
968
|
-
run_output=run_response,
|
|
969
|
-
session_state=session_state,
|
|
970
|
-
dependencies=dependencies,
|
|
971
|
-
metadata=metadata,
|
|
972
|
-
session=session,
|
|
973
|
-
user_id=user_id,
|
|
974
|
-
debug_mode=debug_mode,
|
|
975
|
-
**kwargs,
|
|
976
|
-
)
|
|
977
|
-
run_response.status = RunStatus.completed
|
|
978
|
-
# Stop the timer for the Run duration
|
|
979
|
-
if run_response.metrics:
|
|
980
|
-
run_response.metrics.stop_timer()
|
|
1006
|
+
return self._handle_agent_run_paused(run_response=run_response, session=session, user_id=user_id)
|
|
981
1007
|
|
|
982
|
-
|
|
983
|
-
|
|
1008
|
+
# 8. Store media if enabled
|
|
1009
|
+
if self.store_media:
|
|
1010
|
+
self._store_media(run_response, model_response)
|
|
984
1011
|
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
run_response=run_response,
|
|
988
|
-
input=run_messages.user_message,
|
|
989
|
-
session_id=session.session_id,
|
|
990
|
-
user_id=user_id,
|
|
991
|
-
)
|
|
1012
|
+
# 9. Convert the response to the structured format if needed
|
|
1013
|
+
self._convert_response_to_structured_format(run_response)
|
|
992
1014
|
|
|
993
|
-
|
|
994
|
-
|
|
1015
|
+
# 10. Execute post-hooks after output is generated but before response is returned
|
|
1016
|
+
if self.post_hooks is not None:
|
|
1017
|
+
post_hook_iterator = self._execute_post_hooks(
|
|
1018
|
+
hooks=self.post_hooks, # type: ignore
|
|
1019
|
+
run_output=run_response,
|
|
1020
|
+
session=session,
|
|
1021
|
+
user_id=user_id,
|
|
1022
|
+
session_state=session_state,
|
|
1023
|
+
dependencies=dependencies,
|
|
1024
|
+
metadata=metadata,
|
|
1025
|
+
debug_mode=debug_mode,
|
|
1026
|
+
**kwargs,
|
|
1027
|
+
)
|
|
1028
|
+
deque(post_hook_iterator, maxlen=0)
|
|
995
1029
|
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
run_response=run_response,
|
|
999
|
-
run_messages=run_messages,
|
|
1000
|
-
session=session,
|
|
1001
|
-
user_id=user_id,
|
|
1002
|
-
)
|
|
1003
|
-
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
1004
|
-
deque(response_iterator, maxlen=0)
|
|
1030
|
+
# Check for cancellation
|
|
1031
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1005
1032
|
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
session.upsert_run(run=run_response)
|
|
1033
|
+
# 11. Wait for background memory creation and cultural knowledge creation
|
|
1034
|
+
wait_for_background_tasks(memory_future=memory_future, cultural_knowledge_future=cultural_knowledge_future)
|
|
1009
1035
|
|
|
1010
|
-
|
|
1011
|
-
|
|
1036
|
+
# 12. Create session summary
|
|
1037
|
+
if self.session_summary_manager is not None:
|
|
1038
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1039
|
+
session.upsert_run(run=run_response)
|
|
1040
|
+
try:
|
|
1041
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
1042
|
+
except Exception as e:
|
|
1043
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1044
|
+
|
|
1045
|
+
run_response.status = RunStatus.completed
|
|
1012
1046
|
|
|
1013
|
-
|
|
1014
|
-
|
|
1047
|
+
# 13. Cleanup and store the run response and session
|
|
1048
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1015
1049
|
|
|
1016
|
-
|
|
1050
|
+
# Log Agent Telemetry
|
|
1051
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
1017
1052
|
|
|
1018
|
-
|
|
1019
|
-
cleanup_run(run_response.run_id) # type: ignore
|
|
1053
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
1020
1054
|
|
|
1021
|
-
|
|
1055
|
+
return run_response
|
|
1056
|
+
except RunCancelledException as e:
|
|
1057
|
+
# Handle run cancellation
|
|
1058
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
1059
|
+
run_response.content = str(e)
|
|
1060
|
+
run_response.status = RunStatus.cancelled
|
|
1061
|
+
|
|
1062
|
+
# Cleanup and store the run response and session
|
|
1063
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1064
|
+
|
|
1065
|
+
return run_response
|
|
1066
|
+
finally:
|
|
1067
|
+
# Always clean up the run tracking
|
|
1068
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
1022
1069
|
|
|
1023
1070
|
def _run_stream(
|
|
1024
1071
|
self,
|
|
@@ -1033,7 +1080,7 @@ class Agent:
|
|
|
1033
1080
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1034
1081
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1035
1082
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1036
|
-
|
|
1083
|
+
stream_events: bool = False,
|
|
1037
1084
|
yield_run_response: bool = False,
|
|
1038
1085
|
debug_mode: Optional[bool] = None,
|
|
1039
1086
|
**kwargs: Any,
|
|
@@ -1042,15 +1089,15 @@ class Agent:
|
|
|
1042
1089
|
|
|
1043
1090
|
Steps:
|
|
1044
1091
|
1. Execute pre-hooks
|
|
1045
|
-
2.
|
|
1046
|
-
3.
|
|
1047
|
-
4.
|
|
1048
|
-
5.
|
|
1049
|
-
6.
|
|
1050
|
-
7.
|
|
1051
|
-
8.
|
|
1052
|
-
9. Create
|
|
1053
|
-
10.
|
|
1092
|
+
2. Determine tools for model
|
|
1093
|
+
3. Prepare run messages
|
|
1094
|
+
4. Start memory creation in background thread
|
|
1095
|
+
5. Reason about the task if reasoning is enabled
|
|
1096
|
+
6. Process model response
|
|
1097
|
+
7. Parse response with parser model if provided
|
|
1098
|
+
8. Wait for background memory creation and cultural knowledge creation
|
|
1099
|
+
9. Create session summary
|
|
1100
|
+
10. Cleanup and store the run response and session
|
|
1054
1101
|
"""
|
|
1055
1102
|
|
|
1056
1103
|
# Register run for cancellation tracking
|
|
@@ -1076,6 +1123,7 @@ class Agent:
|
|
|
1076
1123
|
for event in pre_hook_iterator:
|
|
1077
1124
|
yield event
|
|
1078
1125
|
|
|
1126
|
+
# 2. Determine tools for model
|
|
1079
1127
|
self._determine_tools_for_model(
|
|
1080
1128
|
model=self.model,
|
|
1081
1129
|
run_response=run_response,
|
|
@@ -1087,7 +1135,7 @@ class Agent:
|
|
|
1087
1135
|
knowledge_filters=knowledge_filters,
|
|
1088
1136
|
)
|
|
1089
1137
|
|
|
1090
|
-
#
|
|
1138
|
+
# 3. Prepare run messages
|
|
1091
1139
|
run_messages: RunMessages = self._get_run_messages(
|
|
1092
1140
|
run_response=run_response,
|
|
1093
1141
|
input=run_input.input_content,
|
|
@@ -1111,25 +1159,55 @@ class Agent:
|
|
|
1111
1159
|
|
|
1112
1160
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1113
1161
|
|
|
1162
|
+
# Start memory creation on a separate thread (runs concurrently with the main execution loop)
|
|
1163
|
+
memory_future = None
|
|
1164
|
+
# 4. Start memory creation in background thread if memory manager is enabled and agentic memory is disabled
|
|
1165
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
1166
|
+
log_debug("Starting memory creation in background thread.")
|
|
1167
|
+
memory_future = self.background_executor.submit(
|
|
1168
|
+
self._make_memories, run_messages=run_messages, user_id=user_id
|
|
1169
|
+
)
|
|
1170
|
+
|
|
1171
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
1172
|
+
cultural_knowledge_future = None
|
|
1173
|
+
if (
|
|
1174
|
+
run_messages.user_message is not None
|
|
1175
|
+
and self.culture_manager is not None
|
|
1176
|
+
and self.update_cultural_knowledge
|
|
1177
|
+
):
|
|
1178
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
1179
|
+
cultural_knowledge_future = self.background_executor.submit(
|
|
1180
|
+
self._make_cultural_knowledge, run_messages=run_messages
|
|
1181
|
+
)
|
|
1182
|
+
|
|
1114
1183
|
try:
|
|
1115
1184
|
# Start the Run by yielding a RunStarted event
|
|
1116
|
-
if
|
|
1117
|
-
yield
|
|
1185
|
+
if stream_events:
|
|
1186
|
+
yield handle_event( # type: ignore
|
|
1187
|
+
create_run_started_event(run_response),
|
|
1188
|
+
run_response,
|
|
1189
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1190
|
+
store_events=self.store_events,
|
|
1191
|
+
)
|
|
1118
1192
|
|
|
1119
|
-
#
|
|
1120
|
-
yield from self._handle_reasoning_stream(
|
|
1193
|
+
# 5. Reason about the task if reasoning is enabled
|
|
1194
|
+
yield from self._handle_reasoning_stream(
|
|
1195
|
+
run_response=run_response,
|
|
1196
|
+
run_messages=run_messages,
|
|
1197
|
+
stream_events=stream_events,
|
|
1198
|
+
)
|
|
1121
1199
|
|
|
1122
1200
|
# Check for cancellation before model processing
|
|
1123
1201
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1124
1202
|
|
|
1125
|
-
#
|
|
1203
|
+
# 6. Process model response
|
|
1126
1204
|
if self.output_model is None:
|
|
1127
1205
|
for event in self._handle_model_response_stream(
|
|
1128
1206
|
session=session,
|
|
1129
1207
|
run_response=run_response,
|
|
1130
1208
|
run_messages=run_messages,
|
|
1131
1209
|
response_format=response_format,
|
|
1132
|
-
|
|
1210
|
+
stream_events=stream_events,
|
|
1133
1211
|
):
|
|
1134
1212
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1135
1213
|
yield event
|
|
@@ -1144,11 +1222,11 @@ class Agent:
|
|
|
1144
1222
|
run_response=run_response,
|
|
1145
1223
|
run_messages=run_messages,
|
|
1146
1224
|
response_format=response_format,
|
|
1147
|
-
|
|
1225
|
+
stream_events=stream_events,
|
|
1148
1226
|
):
|
|
1149
1227
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1150
1228
|
if isinstance(event, RunContentEvent):
|
|
1151
|
-
if
|
|
1229
|
+
if stream_events:
|
|
1152
1230
|
yield IntermediateRunContentEvent(
|
|
1153
1231
|
content=event.content,
|
|
1154
1232
|
content_type=event.content_type,
|
|
@@ -1161,7 +1239,7 @@ class Agent:
|
|
|
1161
1239
|
session=session,
|
|
1162
1240
|
run_response=run_response,
|
|
1163
1241
|
run_messages=run_messages,
|
|
1164
|
-
|
|
1242
|
+
stream_events=stream_events,
|
|
1165
1243
|
):
|
|
1166
1244
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1167
1245
|
yield event
|
|
@@ -1169,26 +1247,40 @@ class Agent:
|
|
|
1169
1247
|
# Check for cancellation after model processing
|
|
1170
1248
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1171
1249
|
|
|
1172
|
-
#
|
|
1250
|
+
# 7. Parse response with parser model if provided
|
|
1173
1251
|
yield from self._parse_response_with_parser_model_stream(
|
|
1174
|
-
session=session,
|
|
1175
|
-
run_response=run_response,
|
|
1176
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1252
|
+
session=session, run_response=run_response, stream_events=stream_events
|
|
1177
1253
|
)
|
|
1178
1254
|
|
|
1179
1255
|
# We should break out of the run function
|
|
1180
1256
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1181
|
-
yield from
|
|
1257
|
+
yield from wait_for_background_tasks_stream(
|
|
1258
|
+
memory_future=memory_future,
|
|
1259
|
+
cultural_knowledge_future=cultural_knowledge_future,
|
|
1260
|
+
stream_events=stream_events,
|
|
1182
1261
|
run_response=run_response,
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1262
|
+
events_to_skip=self.events_to_skip,
|
|
1263
|
+
store_events=self.store_events,
|
|
1264
|
+
)
|
|
1265
|
+
|
|
1266
|
+
# Handle the paused run
|
|
1267
|
+
yield from self._handle_agent_run_paused_stream(
|
|
1268
|
+
run_response=run_response, session=session, user_id=user_id
|
|
1186
1269
|
)
|
|
1187
1270
|
return
|
|
1188
1271
|
|
|
1272
|
+
# Yield RunContentCompletedEvent
|
|
1273
|
+
if stream_events:
|
|
1274
|
+
yield handle_event( # type: ignore
|
|
1275
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
1276
|
+
run_response,
|
|
1277
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1278
|
+
store_events=self.store_events,
|
|
1279
|
+
)
|
|
1280
|
+
|
|
1189
1281
|
# Execute post-hooks after output is generated but before response is returned
|
|
1190
1282
|
if self.post_hooks is not None:
|
|
1191
|
-
self._execute_post_hooks(
|
|
1283
|
+
yield from self._execute_post_hooks(
|
|
1192
1284
|
hooks=self.post_hooks, # type: ignore
|
|
1193
1285
|
run_output=run_response,
|
|
1194
1286
|
session_state=session_state,
|
|
@@ -1200,48 +1292,56 @@ class Agent:
|
|
|
1200
1292
|
**kwargs,
|
|
1201
1293
|
)
|
|
1202
1294
|
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
# 5. Calculate session metrics
|
|
1210
|
-
self._update_session_metrics(session=session, run_response=run_response)
|
|
1211
|
-
|
|
1212
|
-
# 6. Optional: Save output to file if save_response_to_file is set
|
|
1213
|
-
self.save_run_response_to_file(
|
|
1295
|
+
# 8. Wait for background memory creation and cultural knowledge creation
|
|
1296
|
+
yield from wait_for_background_tasks_stream(
|
|
1297
|
+
memory_future=memory_future,
|
|
1298
|
+
cultural_knowledge_future=cultural_knowledge_future,
|
|
1299
|
+
stream_events=stream_events,
|
|
1214
1300
|
run_response=run_response,
|
|
1215
|
-
input=run_messages.user_message,
|
|
1216
|
-
session_id=session.session_id,
|
|
1217
|
-
user_id=user_id,
|
|
1218
1301
|
)
|
|
1219
1302
|
|
|
1220
|
-
#
|
|
1221
|
-
|
|
1303
|
+
# 9. Create session summary
|
|
1304
|
+
if self.session_summary_manager is not None:
|
|
1305
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1306
|
+
session.upsert_run(run=run_response)
|
|
1222
1307
|
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1308
|
+
if stream_events:
|
|
1309
|
+
yield handle_event( # type: ignore
|
|
1310
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
1311
|
+
run_response,
|
|
1312
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1313
|
+
store_events=self.store_events,
|
|
1314
|
+
)
|
|
1315
|
+
try:
|
|
1316
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
1317
|
+
except Exception as e:
|
|
1318
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1319
|
+
if stream_events:
|
|
1320
|
+
yield handle_event( # type: ignore
|
|
1321
|
+
create_session_summary_completed_event(
|
|
1322
|
+
from_run_response=run_response, session_summary=session.summary
|
|
1323
|
+
),
|
|
1324
|
+
run_response,
|
|
1325
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1326
|
+
store_events=self.store_events,
|
|
1327
|
+
)
|
|
1230
1328
|
|
|
1231
|
-
#
|
|
1232
|
-
completed_event =
|
|
1233
|
-
create_run_completed_event(from_run_response=run_response),
|
|
1329
|
+
# Create the run completed event
|
|
1330
|
+
completed_event = handle_event( # type: ignore
|
|
1331
|
+
create_run_completed_event(from_run_response=run_response),
|
|
1332
|
+
run_response,
|
|
1333
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1334
|
+
store_events=self.store_events,
|
|
1234
1335
|
)
|
|
1235
1336
|
|
|
1236
|
-
#
|
|
1237
|
-
|
|
1238
|
-
session.upsert_run(run=run_response)
|
|
1337
|
+
# Set the run status to completed
|
|
1338
|
+
run_response.status = RunStatus.completed
|
|
1239
1339
|
|
|
1240
|
-
#
|
|
1241
|
-
self.
|
|
1340
|
+
# 10. Cleanup and store the run response and session
|
|
1341
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1242
1342
|
|
|
1243
|
-
if
|
|
1244
|
-
yield completed_event
|
|
1343
|
+
if stream_events:
|
|
1344
|
+
yield completed_event # type: ignore
|
|
1245
1345
|
|
|
1246
1346
|
if yield_run_response:
|
|
1247
1347
|
yield run_response
|
|
@@ -1258,14 +1358,15 @@ class Agent:
|
|
|
1258
1358
|
run_response.content = str(e)
|
|
1259
1359
|
|
|
1260
1360
|
# Yield the cancellation event
|
|
1261
|
-
yield
|
|
1361
|
+
yield handle_event( # type: ignore
|
|
1262
1362
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
1263
1363
|
run_response,
|
|
1364
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1365
|
+
store_events=self.store_events,
|
|
1264
1366
|
)
|
|
1265
1367
|
|
|
1266
|
-
#
|
|
1267
|
-
|
|
1268
|
-
self.save_session(session=session)
|
|
1368
|
+
# Cleanup and store the run response and session
|
|
1369
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
1269
1370
|
finally:
|
|
1270
1371
|
# Always clean up the run tracking
|
|
1271
1372
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -1276,6 +1377,7 @@ class Agent:
|
|
|
1276
1377
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1277
1378
|
*,
|
|
1278
1379
|
stream: Literal[False] = False,
|
|
1380
|
+
stream_events: Optional[bool] = None,
|
|
1279
1381
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1280
1382
|
user_id: Optional[str] = None,
|
|
1281
1383
|
session_id: Optional[str] = None,
|
|
@@ -1301,6 +1403,7 @@ class Agent:
|
|
|
1301
1403
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1302
1404
|
*,
|
|
1303
1405
|
stream: Literal[True] = True,
|
|
1406
|
+
stream_events: Optional[bool] = None,
|
|
1304
1407
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1305
1408
|
user_id: Optional[str] = None,
|
|
1306
1409
|
session_id: Optional[str] = None,
|
|
@@ -1326,6 +1429,7 @@ class Agent:
|
|
|
1326
1429
|
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
|
|
1327
1430
|
*,
|
|
1328
1431
|
stream: Optional[bool] = None,
|
|
1432
|
+
stream_events: Optional[bool] = None,
|
|
1329
1433
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1330
1434
|
user_id: Optional[str] = None,
|
|
1331
1435
|
session_id: Optional[str] = None,
|
|
@@ -1351,6 +1455,11 @@ class Agent:
|
|
|
1351
1455
|
"`run` method is not supported with an async database. Please use `arun` method instead."
|
|
1352
1456
|
)
|
|
1353
1457
|
|
|
1458
|
+
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
1459
|
+
log_warning(
|
|
1460
|
+
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
1461
|
+
)
|
|
1462
|
+
|
|
1354
1463
|
# Create a run_id for this specific run
|
|
1355
1464
|
run_id = str(uuid4())
|
|
1356
1465
|
|
|
@@ -1365,12 +1474,7 @@ class Agent:
|
|
|
1365
1474
|
self.post_hooks = normalize_hooks(self.post_hooks)
|
|
1366
1475
|
self._hooks_normalised = True
|
|
1367
1476
|
|
|
1368
|
-
session_id, user_id
|
|
1369
|
-
run_id=run_id,
|
|
1370
|
-
session_id=session_id,
|
|
1371
|
-
user_id=user_id,
|
|
1372
|
-
session_state=session_state,
|
|
1373
|
-
)
|
|
1477
|
+
session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
|
|
1374
1478
|
|
|
1375
1479
|
# Initialize the Agent
|
|
1376
1480
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -1392,15 +1496,19 @@ class Agent:
|
|
|
1392
1496
|
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1393
1497
|
self._update_metadata(session=agent_session)
|
|
1394
1498
|
|
|
1499
|
+
# Initialize session state
|
|
1500
|
+
session_state = self._initialize_session_state(
|
|
1501
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
1502
|
+
)
|
|
1395
1503
|
# Update session state from DB
|
|
1396
1504
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1397
|
-
|
|
1398
1505
|
# Determine runtime dependencies
|
|
1399
1506
|
run_dependencies = dependencies if dependencies is not None else self.dependencies
|
|
1400
1507
|
|
|
1401
1508
|
# Resolve dependencies
|
|
1402
1509
|
if run_dependencies is not None:
|
|
1403
1510
|
self._resolve_run_dependencies(dependencies=run_dependencies)
|
|
1511
|
+
|
|
1404
1512
|
add_dependencies = (
|
|
1405
1513
|
add_dependencies_to_context if add_dependencies_to_context is not None else self.add_dependencies_to_context
|
|
1406
1514
|
)
|
|
@@ -1422,17 +1530,18 @@ class Agent:
|
|
|
1422
1530
|
if stream is None:
|
|
1423
1531
|
stream = False if self.stream is None else self.stream
|
|
1424
1532
|
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
1428
|
-
)
|
|
1533
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
1534
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
1429
1535
|
|
|
1430
|
-
# Can't
|
|
1536
|
+
# Can't stream events if streaming is disabled
|
|
1431
1537
|
if stream is False:
|
|
1432
|
-
|
|
1538
|
+
stream_events = False
|
|
1539
|
+
|
|
1540
|
+
if stream_events is None:
|
|
1541
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
1433
1542
|
|
|
1434
1543
|
self.stream = self.stream or stream
|
|
1435
|
-
self.
|
|
1544
|
+
self.stream_events = self.stream_events or stream_events
|
|
1436
1545
|
|
|
1437
1546
|
# Prepare arguments for the model
|
|
1438
1547
|
response_format = self._get_response_format() if self.parser_model is None else None
|
|
@@ -1484,7 +1593,7 @@ class Agent:
|
|
|
1484
1593
|
metadata=metadata,
|
|
1485
1594
|
dependencies=run_dependencies,
|
|
1486
1595
|
response_format=response_format,
|
|
1487
|
-
|
|
1596
|
+
stream_events=stream_events,
|
|
1488
1597
|
yield_run_response=yield_run_response,
|
|
1489
1598
|
debug_mode=debug_mode,
|
|
1490
1599
|
**kwargs,
|
|
@@ -1523,17 +1632,6 @@ class Agent:
|
|
|
1523
1632
|
import time
|
|
1524
1633
|
|
|
1525
1634
|
time.sleep(delay)
|
|
1526
|
-
except RunCancelledException as e:
|
|
1527
|
-
# Handle run cancellation
|
|
1528
|
-
log_info(f"Run {run_response.run_id} was cancelled")
|
|
1529
|
-
run_response.content = str(e)
|
|
1530
|
-
run_response.status = RunStatus.cancelled
|
|
1531
|
-
|
|
1532
|
-
# Add the RunOutput to Agent Session even when cancelled
|
|
1533
|
-
agent_session.upsert_run(run=run_response)
|
|
1534
|
-
self.save_session(session=agent_session)
|
|
1535
|
-
|
|
1536
|
-
return run_response
|
|
1537
1635
|
except KeyboardInterrupt:
|
|
1538
1636
|
run_response.content = "Operation cancelled by user"
|
|
1539
1637
|
run_response.status = RunStatus.cancelled
|
|
@@ -1578,7 +1676,7 @@ class Agent:
|
|
|
1578
1676
|
debug_mode: Optional[bool] = None,
|
|
1579
1677
|
**kwargs: Any,
|
|
1580
1678
|
) -> RunOutput:
|
|
1581
|
-
"""Run the Agent and
|
|
1679
|
+
"""Run the Agent and return the RunOutput.
|
|
1582
1680
|
|
|
1583
1681
|
Steps:
|
|
1584
1682
|
1. Read or create session
|
|
@@ -1587,14 +1685,16 @@ class Agent:
|
|
|
1587
1685
|
4. Execute pre-hooks
|
|
1588
1686
|
5. Determine tools for model
|
|
1589
1687
|
6. Prepare run messages
|
|
1590
|
-
7.
|
|
1591
|
-
8.
|
|
1592
|
-
9.
|
|
1593
|
-
10.
|
|
1594
|
-
11.
|
|
1595
|
-
12.
|
|
1596
|
-
13.
|
|
1597
|
-
14.
|
|
1688
|
+
7. Start memory creation in background task
|
|
1689
|
+
8. Reason about the task if reasoning is enabled
|
|
1690
|
+
9. Generate a response from the Model (includes running function calls)
|
|
1691
|
+
10. Update the RunOutput with the model response
|
|
1692
|
+
11. Convert response to structured format
|
|
1693
|
+
12. Store media if enabled
|
|
1694
|
+
13. Execute post-hooks
|
|
1695
|
+
14. Wait for background memory creation
|
|
1696
|
+
15. Create session summary
|
|
1697
|
+
16. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
1598
1698
|
"""
|
|
1599
1699
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1600
1700
|
|
|
@@ -1602,13 +1702,15 @@ class Agent:
|
|
|
1602
1702
|
register_run(run_response.run_id) # type: ignore
|
|
1603
1703
|
|
|
1604
1704
|
# 1. Read or create session. Reads from the database if provided.
|
|
1605
|
-
|
|
1606
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1607
|
-
else:
|
|
1608
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1705
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1609
1706
|
|
|
1610
1707
|
# 2. Update metadata and session state
|
|
1611
1708
|
self._update_metadata(session=agent_session)
|
|
1709
|
+
# Initialize session state
|
|
1710
|
+
session_state = self._initialize_session_state(
|
|
1711
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
|
|
1712
|
+
)
|
|
1713
|
+
# Update session state from DB
|
|
1612
1714
|
if session_state is not None:
|
|
1613
1715
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1614
1716
|
|
|
@@ -1672,12 +1774,37 @@ class Agent:
|
|
|
1672
1774
|
if len(run_messages.messages) == 0:
|
|
1673
1775
|
log_error("No messages to be sent to the model.")
|
|
1674
1776
|
|
|
1777
|
+
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
1778
|
+
memory_task = None
|
|
1779
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
1780
|
+
import asyncio
|
|
1781
|
+
|
|
1782
|
+
log_debug("Starting memory creation in background task.")
|
|
1783
|
+
memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
1784
|
+
|
|
1785
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
1786
|
+
cultural_knowledge_task = None
|
|
1787
|
+
if (
|
|
1788
|
+
run_messages.user_message is not None
|
|
1789
|
+
and self.culture_manager is not None
|
|
1790
|
+
and self.update_cultural_knowledge
|
|
1791
|
+
):
|
|
1792
|
+
import asyncio
|
|
1793
|
+
|
|
1794
|
+
log_debug("Starting cultural knowledge creation in background thread.")
|
|
1795
|
+
cultural_knowledge_task = asyncio.create_task(self._acreate_cultural_knowledge(run_messages=run_messages))
|
|
1796
|
+
|
|
1675
1797
|
try:
|
|
1676
|
-
#
|
|
1798
|
+
# Check for cancellation before model call
|
|
1799
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1800
|
+
|
|
1801
|
+
# 8. Reason about the task if reasoning is enabled
|
|
1677
1802
|
await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
|
|
1803
|
+
|
|
1804
|
+
# Check for cancellation before model call
|
|
1678
1805
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1679
1806
|
|
|
1680
|
-
#
|
|
1807
|
+
# 9. Generate a response from the Model (includes running function calls)
|
|
1681
1808
|
model_response: ModelResponse = await self.model.aresponse(
|
|
1682
1809
|
messages=run_messages.messages,
|
|
1683
1810
|
tools=self._tools_for_model,
|
|
@@ -1687,43 +1814,42 @@ class Agent:
|
|
|
1687
1814
|
response_format=response_format,
|
|
1688
1815
|
send_media_to_model=self.send_media_to_model,
|
|
1689
1816
|
)
|
|
1817
|
+
|
|
1818
|
+
# Check for cancellation after model call
|
|
1690
1819
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1691
1820
|
|
|
1692
1821
|
# If an output model is provided, generate output using the output model
|
|
1693
1822
|
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
1823
|
+
|
|
1694
1824
|
# If a parser model is provided, structure the response separately
|
|
1695
1825
|
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1696
1826
|
|
|
1697
|
-
#
|
|
1827
|
+
# 10. Update the RunOutput with the model response
|
|
1698
1828
|
self._update_run_response(
|
|
1699
1829
|
model_response=model_response,
|
|
1700
1830
|
run_response=run_response,
|
|
1701
1831
|
run_messages=run_messages,
|
|
1702
1832
|
)
|
|
1703
1833
|
|
|
1704
|
-
#
|
|
1705
|
-
if self.store_media:
|
|
1706
|
-
self._store_media(run_response, model_response)
|
|
1707
|
-
|
|
1708
|
-
# Break out of the run function if a tool call is paused
|
|
1834
|
+
# We should break out of the run function
|
|
1709
1835
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
user_id=user_id
|
|
1836
|
+
await await_for_background_tasks(
|
|
1837
|
+
memory_task=memory_task, cultural_knowledge_task=cultural_knowledge_task
|
|
1838
|
+
)
|
|
1839
|
+
return await self._ahandle_agent_run_paused(
|
|
1840
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
1715
1841
|
)
|
|
1716
|
-
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1717
|
-
|
|
1718
|
-
# 10. Calculate session metrics
|
|
1719
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
1720
1842
|
|
|
1721
|
-
# Convert the response to the structured format if needed
|
|
1843
|
+
# 11. Convert the response to the structured format if needed
|
|
1722
1844
|
self._convert_response_to_structured_format(run_response)
|
|
1723
1845
|
|
|
1724
|
-
#
|
|
1846
|
+
# 12. Store media if enabled
|
|
1847
|
+
if self.store_media:
|
|
1848
|
+
self._store_media(run_response, model_response)
|
|
1849
|
+
|
|
1850
|
+
# 13. Execute post-hooks (after output is generated but before response is returned)
|
|
1725
1851
|
if self.post_hooks is not None:
|
|
1726
|
-
|
|
1852
|
+
async for _ in self._aexecute_post_hooks(
|
|
1727
1853
|
hooks=self.post_hooks, # type: ignore
|
|
1728
1854
|
run_output=run_response,
|
|
1729
1855
|
session_state=session_state,
|
|
@@ -1733,44 +1859,28 @@ class Agent:
|
|
|
1733
1859
|
user_id=user_id,
|
|
1734
1860
|
debug_mode=debug_mode,
|
|
1735
1861
|
**kwargs,
|
|
1736
|
-
)
|
|
1862
|
+
):
|
|
1863
|
+
pass
|
|
1737
1864
|
|
|
1738
|
-
#
|
|
1739
|
-
run_response.
|
|
1865
|
+
# Check for cancellation
|
|
1866
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1740
1867
|
|
|
1741
|
-
#
|
|
1742
|
-
|
|
1743
|
-
run_response.metrics.stop_timer()
|
|
1868
|
+
# 14. Wait for background memory creation
|
|
1869
|
+
await await_for_background_tasks(memory_task=memory_task, cultural_knowledge_task=cultural_knowledge_task)
|
|
1744
1870
|
|
|
1745
|
-
#
|
|
1746
|
-
self.
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1871
|
+
# 15. Create session summary
|
|
1872
|
+
if self.session_summary_manager is not None:
|
|
1873
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
1874
|
+
agent_session.upsert_run(run=run_response)
|
|
1875
|
+
try:
|
|
1876
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
1877
|
+
except Exception as e:
|
|
1878
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
1752
1879
|
|
|
1753
|
-
|
|
1754
|
-
agent_session.upsert_run(run=run_response)
|
|
1880
|
+
run_response.status = RunStatus.completed
|
|
1755
1881
|
|
|
1756
|
-
#
|
|
1757
|
-
|
|
1758
|
-
run_response=run_response,
|
|
1759
|
-
run_messages=run_messages,
|
|
1760
|
-
session=agent_session,
|
|
1761
|
-
user_id=user_id,
|
|
1762
|
-
):
|
|
1763
|
-
pass
|
|
1764
|
-
|
|
1765
|
-
# 13. Scrub the stored run based on storage flags
|
|
1766
|
-
if self._scrub_run_output_for_storage(run_response):
|
|
1767
|
-
agent_session.upsert_run(run=run_response)
|
|
1768
|
-
|
|
1769
|
-
# 14. Save session to storage
|
|
1770
|
-
if self._has_async_db():
|
|
1771
|
-
await self.asave_session(session=agent_session)
|
|
1772
|
-
else:
|
|
1773
|
-
self.save_session(session=agent_session)
|
|
1882
|
+
# 16. Cleanup and store the run response and session
|
|
1883
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
1774
1884
|
|
|
1775
1885
|
# Log Agent Telemetry
|
|
1776
1886
|
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
@@ -1785,16 +1895,30 @@ class Agent:
|
|
|
1785
1895
|
run_response.content = str(e)
|
|
1786
1896
|
run_response.status = RunStatus.cancelled
|
|
1787
1897
|
|
|
1788
|
-
#
|
|
1789
|
-
|
|
1790
|
-
if self._has_async_db():
|
|
1791
|
-
await self.asave_session(session=agent_session)
|
|
1792
|
-
else:
|
|
1793
|
-
self.save_session(session=agent_session)
|
|
1898
|
+
# Cleanup and store the run response and session
|
|
1899
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
1794
1900
|
|
|
1795
1901
|
return run_response
|
|
1796
1902
|
|
|
1797
1903
|
finally:
|
|
1904
|
+
# Cancel the memory task if it's still running
|
|
1905
|
+
if memory_task is not None and not memory_task.done():
|
|
1906
|
+
import asyncio
|
|
1907
|
+
|
|
1908
|
+
memory_task.cancel()
|
|
1909
|
+
try:
|
|
1910
|
+
await memory_task
|
|
1911
|
+
except asyncio.CancelledError:
|
|
1912
|
+
pass
|
|
1913
|
+
# Cancel the cultural knowledge task if it's still running
|
|
1914
|
+
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
|
|
1915
|
+
import asyncio
|
|
1916
|
+
|
|
1917
|
+
cultural_knowledge_task.cancel()
|
|
1918
|
+
try:
|
|
1919
|
+
await cultural_knowledge_task
|
|
1920
|
+
except asyncio.CancelledError:
|
|
1921
|
+
pass
|
|
1798
1922
|
# Always clean up the run tracking
|
|
1799
1923
|
cleanup_run(run_response.run_id) # type: ignore
|
|
1800
1924
|
|
|
@@ -1811,7 +1935,7 @@ class Agent:
|
|
|
1811
1935
|
add_session_state_to_context: Optional[bool] = None,
|
|
1812
1936
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1813
1937
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1814
|
-
|
|
1938
|
+
stream_events: bool = False,
|
|
1815
1939
|
yield_run_response: Optional[bool] = None,
|
|
1816
1940
|
debug_mode: Optional[bool] = None,
|
|
1817
1941
|
**kwargs: Any,
|
|
@@ -1825,28 +1949,35 @@ class Agent:
|
|
|
1825
1949
|
4. Execute pre-hooks
|
|
1826
1950
|
5. Determine tools for model
|
|
1827
1951
|
6. Prepare run messages
|
|
1828
|
-
7.
|
|
1829
|
-
8.
|
|
1830
|
-
9.
|
|
1831
|
-
10.
|
|
1832
|
-
11.
|
|
1833
|
-
12. Create
|
|
1834
|
-
13.
|
|
1952
|
+
7. Start memory creation in background task
|
|
1953
|
+
8. Reason about the task if reasoning is enabled
|
|
1954
|
+
9. Generate a response from the Model (includes running function calls)
|
|
1955
|
+
10. Parse response with parser model if provided
|
|
1956
|
+
11. Wait for background memory creation
|
|
1957
|
+
12. Create session summary
|
|
1958
|
+
13. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
1835
1959
|
"""
|
|
1836
1960
|
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
|
|
1837
1961
|
|
|
1838
1962
|
# Start the Run by yielding a RunStarted event
|
|
1839
|
-
if
|
|
1840
|
-
yield
|
|
1963
|
+
if stream_events:
|
|
1964
|
+
yield handle_event( # type: ignore
|
|
1965
|
+
create_run_started_event(run_response),
|
|
1966
|
+
run_response,
|
|
1967
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
1968
|
+
store_events=self.store_events,
|
|
1969
|
+
)
|
|
1841
1970
|
|
|
1842
1971
|
# 1. Read or create session. Reads from the database if provided.
|
|
1843
|
-
|
|
1844
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1845
|
-
else:
|
|
1846
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
1972
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
1847
1973
|
|
|
1848
1974
|
# 2. Update metadata and session state
|
|
1849
1975
|
self._update_metadata(session=agent_session)
|
|
1976
|
+
# Initialize session state
|
|
1977
|
+
session_state = self._initialize_session_state(
|
|
1978
|
+
session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_response.run_id
|
|
1979
|
+
)
|
|
1980
|
+
# Update session state from DB
|
|
1850
1981
|
if session_state is not None:
|
|
1851
1982
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
1852
1983
|
|
|
@@ -1906,24 +2037,49 @@ class Agent:
|
|
|
1906
2037
|
if len(run_messages.messages) == 0:
|
|
1907
2038
|
log_error("No messages to be sent to the model.")
|
|
1908
2039
|
|
|
2040
|
+
# 7. Start memory creation as a background task (runs concurrently with the main execution)
|
|
2041
|
+
memory_task = None
|
|
2042
|
+
if run_messages.user_message is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
2043
|
+
import asyncio
|
|
2044
|
+
|
|
2045
|
+
log_debug("Starting memory creation in background task.")
|
|
2046
|
+
memory_task = asyncio.create_task(self._amake_memories(run_messages=run_messages, user_id=user_id))
|
|
2047
|
+
|
|
2048
|
+
# Start cultural knowledge creation on a separate thread (runs concurrently with the main execution loop)
|
|
2049
|
+
cultural_knowledge_task = None
|
|
2050
|
+
if (
|
|
2051
|
+
run_messages.user_message is not None
|
|
2052
|
+
and self.culture_manager is not None
|
|
2053
|
+
and self.update_cultural_knowledge
|
|
2054
|
+
):
|
|
2055
|
+
import asyncio
|
|
2056
|
+
|
|
2057
|
+
log_debug("Starting cultural knowledge creation in background task.")
|
|
2058
|
+
cultural_knowledge_task = asyncio.create_task(self._acreate_cultural_knowledge(run_messages=run_messages))
|
|
2059
|
+
|
|
1909
2060
|
# Register run for cancellation tracking
|
|
1910
2061
|
register_run(run_response.run_id) # type: ignore
|
|
1911
2062
|
|
|
1912
2063
|
try:
|
|
1913
|
-
#
|
|
1914
|
-
async for item in self._ahandle_reasoning_stream(
|
|
2064
|
+
# 8. Reason about the task if reasoning is enabled
|
|
2065
|
+
async for item in self._ahandle_reasoning_stream(
|
|
2066
|
+
run_response=run_response,
|
|
2067
|
+
run_messages=run_messages,
|
|
2068
|
+
stream_events=stream_events,
|
|
2069
|
+
):
|
|
1915
2070
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1916
2071
|
yield item
|
|
2072
|
+
|
|
1917
2073
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1918
2074
|
|
|
1919
|
-
#
|
|
2075
|
+
# 9. Generate a response from the Model
|
|
1920
2076
|
if self.output_model is None:
|
|
1921
2077
|
async for event in self._ahandle_model_response_stream(
|
|
1922
2078
|
session=agent_session,
|
|
1923
2079
|
run_response=run_response,
|
|
1924
2080
|
run_messages=run_messages,
|
|
1925
2081
|
response_format=response_format,
|
|
1926
|
-
|
|
2082
|
+
stream_events=stream_events,
|
|
1927
2083
|
):
|
|
1928
2084
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1929
2085
|
yield event
|
|
@@ -1938,11 +2094,11 @@ class Agent:
|
|
|
1938
2094
|
run_response=run_response,
|
|
1939
2095
|
run_messages=run_messages,
|
|
1940
2096
|
response_format=response_format,
|
|
1941
|
-
|
|
2097
|
+
stream_events=stream_events,
|
|
1942
2098
|
):
|
|
1943
2099
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1944
2100
|
if isinstance(event, RunContentEvent):
|
|
1945
|
-
if
|
|
2101
|
+
if stream_events:
|
|
1946
2102
|
yield IntermediateRunContentEvent(
|
|
1947
2103
|
content=event.content,
|
|
1948
2104
|
content_type=event.content_type,
|
|
@@ -1955,7 +2111,7 @@ class Agent:
|
|
|
1955
2111
|
session=agent_session,
|
|
1956
2112
|
run_response=run_response,
|
|
1957
2113
|
run_messages=run_messages,
|
|
1958
|
-
|
|
2114
|
+
stream_events=stream_events,
|
|
1959
2115
|
):
|
|
1960
2116
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1961
2117
|
yield event
|
|
@@ -1963,28 +2119,39 @@ class Agent:
|
|
|
1963
2119
|
# Check for cancellation after model processing
|
|
1964
2120
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1965
2121
|
|
|
1966
|
-
#
|
|
2122
|
+
# 10. Parse response with parser model if provided
|
|
1967
2123
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
1968
|
-
session=agent_session,
|
|
1969
|
-
run_response=run_response,
|
|
1970
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2124
|
+
session=agent_session, run_response=run_response, stream_events=stream_events
|
|
1971
2125
|
):
|
|
1972
2126
|
yield event
|
|
1973
2127
|
|
|
2128
|
+
if stream_events:
|
|
2129
|
+
yield handle_event( # type: ignore
|
|
2130
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
2131
|
+
run_response,
|
|
2132
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2133
|
+
store_events=self.store_events,
|
|
2134
|
+
)
|
|
2135
|
+
|
|
1974
2136
|
# Break out of the run function if a tool call is paused
|
|
1975
2137
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
1976
|
-
for item in
|
|
2138
|
+
async for item in await_for_background_tasks_stream(
|
|
2139
|
+
memory_task=memory_task,
|
|
2140
|
+
cultural_knowledge_task=cultural_knowledge_task,
|
|
2141
|
+
stream_events=stream_events,
|
|
1977
2142
|
run_response=run_response,
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
2143
|
+
):
|
|
2144
|
+
yield item
|
|
2145
|
+
|
|
2146
|
+
async for item in self._ahandle_agent_run_paused_stream(
|
|
2147
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
1981
2148
|
):
|
|
1982
2149
|
yield item
|
|
1983
2150
|
return
|
|
1984
2151
|
|
|
1985
2152
|
# Execute post-hooks (after output is generated but before response is returned)
|
|
1986
2153
|
if self.post_hooks is not None:
|
|
1987
|
-
|
|
2154
|
+
async for event in self._aexecute_post_hooks(
|
|
1988
2155
|
hooks=self.post_hooks, # type: ignore
|
|
1989
2156
|
run_output=run_response,
|
|
1990
2157
|
session_state=session_state,
|
|
@@ -1994,55 +2161,62 @@ class Agent:
|
|
|
1994
2161
|
user_id=user_id,
|
|
1995
2162
|
debug_mode=debug_mode,
|
|
1996
2163
|
**kwargs,
|
|
1997
|
-
)
|
|
1998
|
-
|
|
1999
|
-
# Set the run status to completed
|
|
2000
|
-
run_response.status = RunStatus.completed
|
|
2001
|
-
|
|
2002
|
-
# Set the run duration
|
|
2003
|
-
if run_response.metrics:
|
|
2004
|
-
run_response.metrics.stop_timer()
|
|
2005
|
-
|
|
2006
|
-
# 9. Calculate session metrics
|
|
2007
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
2164
|
+
):
|
|
2165
|
+
yield event
|
|
2008
2166
|
|
|
2009
|
-
#
|
|
2010
|
-
|
|
2167
|
+
# 11. Wait for background memory creation
|
|
2168
|
+
async for item in await_for_background_tasks_stream(
|
|
2169
|
+
memory_task=memory_task,
|
|
2170
|
+
cultural_knowledge_task=cultural_knowledge_task,
|
|
2171
|
+
stream_events=stream_events,
|
|
2011
2172
|
run_response=run_response,
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2173
|
+
events_to_skip=self.events_to_skip,
|
|
2174
|
+
store_events=self.store_events,
|
|
2175
|
+
):
|
|
2176
|
+
yield item
|
|
2016
2177
|
|
|
2017
|
-
#
|
|
2018
|
-
|
|
2178
|
+
# 12. Create session summary
|
|
2179
|
+
if self.session_summary_manager is not None:
|
|
2180
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2181
|
+
agent_session.upsert_run(run=run_response)
|
|
2019
2182
|
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
|
|
2183
|
+
if stream_events:
|
|
2184
|
+
yield handle_event( # type: ignore
|
|
2185
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
2186
|
+
run_response,
|
|
2187
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2188
|
+
store_events=self.store_events,
|
|
2189
|
+
)
|
|
2190
|
+
try:
|
|
2191
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
2192
|
+
except Exception as e:
|
|
2193
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2194
|
+
if stream_events:
|
|
2195
|
+
yield handle_event( # type: ignore
|
|
2196
|
+
create_session_summary_completed_event(
|
|
2197
|
+
from_run_response=run_response, session_summary=agent_session.summary
|
|
2198
|
+
),
|
|
2199
|
+
run_response,
|
|
2200
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2201
|
+
store_events=self.store_events,
|
|
2202
|
+
)
|
|
2028
2203
|
|
|
2029
|
-
#
|
|
2030
|
-
completed_event =
|
|
2031
|
-
create_run_completed_event(from_run_response=run_response),
|
|
2204
|
+
# Create the run completed event
|
|
2205
|
+
completed_event = handle_event(
|
|
2206
|
+
create_run_completed_event(from_run_response=run_response),
|
|
2207
|
+
run_response,
|
|
2208
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2209
|
+
store_events=self.store_events,
|
|
2032
2210
|
)
|
|
2033
2211
|
|
|
2034
|
-
#
|
|
2035
|
-
|
|
2036
|
-
agent_session.upsert_run(run=run_response)
|
|
2212
|
+
# Set the run status to completed
|
|
2213
|
+
run_response.status = RunStatus.completed
|
|
2037
2214
|
|
|
2038
|
-
#
|
|
2039
|
-
|
|
2040
|
-
await self.asave_session(session=agent_session)
|
|
2041
|
-
else:
|
|
2042
|
-
self.save_session(session=agent_session)
|
|
2215
|
+
# 13. Cleanup and store the run response and session
|
|
2216
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
2043
2217
|
|
|
2044
|
-
if
|
|
2045
|
-
yield completed_event
|
|
2218
|
+
if stream_events:
|
|
2219
|
+
yield completed_event # type: ignore
|
|
2046
2220
|
|
|
2047
2221
|
if yield_run_response:
|
|
2048
2222
|
yield run_response
|
|
@@ -2059,18 +2233,31 @@ class Agent:
|
|
|
2059
2233
|
run_response.content = str(e)
|
|
2060
2234
|
|
|
2061
2235
|
# Yield the cancellation event
|
|
2062
|
-
yield
|
|
2236
|
+
yield handle_event( # type: ignore
|
|
2063
2237
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
2064
2238
|
run_response,
|
|
2239
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2240
|
+
store_events=self.store_events,
|
|
2065
2241
|
)
|
|
2066
2242
|
|
|
2067
|
-
#
|
|
2068
|
-
|
|
2069
|
-
if self._has_async_db():
|
|
2070
|
-
await self.asave_session(session=agent_session)
|
|
2071
|
-
else:
|
|
2072
|
-
self.save_session(session=agent_session)
|
|
2243
|
+
# Cleanup and store the run response and session
|
|
2244
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
2073
2245
|
finally:
|
|
2246
|
+
# Cancel the memory task if it's still running
|
|
2247
|
+
if memory_task is not None and not memory_task.done():
|
|
2248
|
+
memory_task.cancel()
|
|
2249
|
+
try:
|
|
2250
|
+
await memory_task
|
|
2251
|
+
except asyncio.CancelledError:
|
|
2252
|
+
pass
|
|
2253
|
+
|
|
2254
|
+
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
|
|
2255
|
+
cultural_knowledge_task.cancel()
|
|
2256
|
+
try:
|
|
2257
|
+
await cultural_knowledge_task
|
|
2258
|
+
except asyncio.CancelledError:
|
|
2259
|
+
pass
|
|
2260
|
+
|
|
2074
2261
|
# Always clean up the run tracking
|
|
2075
2262
|
cleanup_run(run_response.run_id) # type: ignore
|
|
2076
2263
|
|
|
@@ -2087,6 +2274,7 @@ class Agent:
|
|
|
2087
2274
|
images: Optional[Sequence[Image]] = None,
|
|
2088
2275
|
videos: Optional[Sequence[Video]] = None,
|
|
2089
2276
|
files: Optional[Sequence[File]] = None,
|
|
2277
|
+
stream_events: Optional[bool] = None,
|
|
2090
2278
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2091
2279
|
retries: Optional[int] = None,
|
|
2092
2280
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2111,6 +2299,7 @@ class Agent:
|
|
|
2111
2299
|
images: Optional[Sequence[Image]] = None,
|
|
2112
2300
|
videos: Optional[Sequence[Video]] = None,
|
|
2113
2301
|
files: Optional[Sequence[File]] = None,
|
|
2302
|
+
stream_events: Optional[bool] = None,
|
|
2114
2303
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2115
2304
|
retries: Optional[int] = None,
|
|
2116
2305
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2136,6 +2325,7 @@ class Agent:
|
|
|
2136
2325
|
images: Optional[Sequence[Image]] = None,
|
|
2137
2326
|
videos: Optional[Sequence[Video]] = None,
|
|
2138
2327
|
files: Optional[Sequence[File]] = None,
|
|
2328
|
+
stream_events: Optional[bool] = None,
|
|
2139
2329
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2140
2330
|
retries: Optional[int] = None,
|
|
2141
2331
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -2150,6 +2340,11 @@ class Agent:
|
|
|
2150
2340
|
) -> Union[RunOutput, AsyncIterator[RunOutputEvent]]:
|
|
2151
2341
|
"""Async Run the Agent and return the response."""
|
|
2152
2342
|
|
|
2343
|
+
if (add_history_to_context or self.add_history_to_context) and not self.db and not self.team_id:
|
|
2344
|
+
log_warning(
|
|
2345
|
+
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
2346
|
+
)
|
|
2347
|
+
|
|
2153
2348
|
# Create a run_id for this specific run
|
|
2154
2349
|
run_id = str(uuid4())
|
|
2155
2350
|
|
|
@@ -2165,12 +2360,7 @@ class Agent:
|
|
|
2165
2360
|
self._hooks_normalised = True
|
|
2166
2361
|
|
|
2167
2362
|
# Initialize session
|
|
2168
|
-
session_id, user_id
|
|
2169
|
-
run_id=run_id,
|
|
2170
|
-
session_id=session_id,
|
|
2171
|
-
user_id=user_id,
|
|
2172
|
-
session_state=session_state,
|
|
2173
|
-
)
|
|
2363
|
+
session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
|
|
2174
2364
|
|
|
2175
2365
|
# Initialize the Agent
|
|
2176
2366
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -2204,17 +2394,18 @@ class Agent:
|
|
|
2204
2394
|
if stream is None:
|
|
2205
2395
|
stream = False if self.stream is None else self.stream
|
|
2206
2396
|
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
2210
|
-
)
|
|
2397
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2398
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
2211
2399
|
|
|
2212
|
-
# Can't
|
|
2400
|
+
# Can't stream events if streaming is disabled
|
|
2213
2401
|
if stream is False:
|
|
2214
|
-
|
|
2402
|
+
stream_events = False
|
|
2403
|
+
|
|
2404
|
+
if stream_events is None:
|
|
2405
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2215
2406
|
|
|
2216
2407
|
self.stream = self.stream or stream
|
|
2217
|
-
self.
|
|
2408
|
+
self.stream_events = self.stream_events or stream_events
|
|
2218
2409
|
|
|
2219
2410
|
# Prepare arguments for the model
|
|
2220
2411
|
response_format = self._get_response_format() if self.parser_model is None else None
|
|
@@ -2264,7 +2455,7 @@ class Agent:
|
|
|
2264
2455
|
run_response=run_response,
|
|
2265
2456
|
user_id=user_id,
|
|
2266
2457
|
response_format=response_format,
|
|
2267
|
-
|
|
2458
|
+
stream_events=stream_events,
|
|
2268
2459
|
yield_run_response=yield_run_response,
|
|
2269
2460
|
dependencies=run_dependencies,
|
|
2270
2461
|
session_id=session_id,
|
|
@@ -2346,6 +2537,7 @@ class Agent:
|
|
|
2346
2537
|
run_id: Optional[str] = None,
|
|
2347
2538
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2348
2539
|
stream: Literal[False] = False,
|
|
2540
|
+
stream_events: Optional[bool] = None,
|
|
2349
2541
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2350
2542
|
user_id: Optional[str] = None,
|
|
2351
2543
|
session_id: Optional[str] = None,
|
|
@@ -2364,6 +2556,7 @@ class Agent:
|
|
|
2364
2556
|
run_id: Optional[str] = None,
|
|
2365
2557
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2366
2558
|
stream: Literal[True] = True,
|
|
2559
|
+
stream_events: Optional[bool] = False,
|
|
2367
2560
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2368
2561
|
user_id: Optional[str] = None,
|
|
2369
2562
|
session_id: Optional[str] = None,
|
|
@@ -2381,6 +2574,7 @@ class Agent:
|
|
|
2381
2574
|
run_id: Optional[str] = None,
|
|
2382
2575
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2383
2576
|
stream: Optional[bool] = None,
|
|
2577
|
+
stream_events: Optional[bool] = False,
|
|
2384
2578
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2385
2579
|
user_id: Optional[str] = None,
|
|
2386
2580
|
session_id: Optional[str] = None,
|
|
@@ -2398,13 +2592,15 @@ class Agent:
|
|
|
2398
2592
|
run_id: The run id to continue. Alternative to passing run_response.
|
|
2399
2593
|
updated_tools: The updated tools to use for the run. Required to be used with `run_id`.
|
|
2400
2594
|
stream: Whether to stream the response.
|
|
2401
|
-
|
|
2595
|
+
stream_events: Whether to stream all events.
|
|
2402
2596
|
user_id: The user id to continue the run for.
|
|
2403
2597
|
session_id: The session id to continue the run for.
|
|
2404
2598
|
retries: The number of retries to continue the run for.
|
|
2405
2599
|
knowledge_filters: The knowledge filters to use for the run.
|
|
2406
2600
|
dependencies: The dependencies to use for the run.
|
|
2601
|
+
metadata: The metadata to use for the run.
|
|
2407
2602
|
debug_mode: Whether to enable debug mode.
|
|
2603
|
+
(deprecated) stream_intermediate_steps: Whether to stream all steps.
|
|
2408
2604
|
"""
|
|
2409
2605
|
if run_response is None and run_id is None:
|
|
2410
2606
|
raise ValueError("Either run_response or run_id must be provided.")
|
|
@@ -2417,10 +2613,9 @@ class Agent:
|
|
|
2417
2613
|
|
|
2418
2614
|
session_id = run_response.session_id if run_response else session_id
|
|
2419
2615
|
|
|
2420
|
-
session_id, user_id
|
|
2421
|
-
run_id=run_id, # type: ignore
|
|
2616
|
+
session_id, user_id = self._initialize_session(
|
|
2422
2617
|
session_id=session_id,
|
|
2423
|
-
user_id=user_id,
|
|
2618
|
+
user_id=user_id,
|
|
2424
2619
|
)
|
|
2425
2620
|
# Initialize the Agent
|
|
2426
2621
|
self.initialize_agent(debug_mode=debug_mode)
|
|
@@ -2429,6 +2624,10 @@ class Agent:
|
|
|
2429
2624
|
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
2430
2625
|
self._update_metadata(session=agent_session)
|
|
2431
2626
|
|
|
2627
|
+
# Initialize session state
|
|
2628
|
+
session_state = self._initialize_session_state(
|
|
2629
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
2630
|
+
)
|
|
2432
2631
|
# Update session state from DB
|
|
2433
2632
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
2434
2633
|
|
|
@@ -2458,17 +2657,22 @@ class Agent:
|
|
|
2458
2657
|
if stream is None:
|
|
2459
2658
|
stream = False if self.stream is None else self.stream
|
|
2460
2659
|
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
False if self.stream_intermediate_steps is None else self.stream_intermediate_steps
|
|
2464
|
-
)
|
|
2660
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2661
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
2465
2662
|
|
|
2466
|
-
# Can't
|
|
2663
|
+
# Can't stream events if streaming is disabled
|
|
2467
2664
|
if stream is False:
|
|
2468
|
-
|
|
2665
|
+
stream_events = False
|
|
2666
|
+
|
|
2667
|
+
if stream_events is None:
|
|
2668
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2669
|
+
|
|
2670
|
+
# Can't stream events if streaming is disabled
|
|
2671
|
+
if stream is False:
|
|
2672
|
+
stream_events = False
|
|
2469
2673
|
|
|
2470
2674
|
self.stream = self.stream or stream
|
|
2471
|
-
self.
|
|
2675
|
+
self.stream_events = self.stream_events or stream_events
|
|
2472
2676
|
|
|
2473
2677
|
# Run can be continued from previous run response or from passed run_response context
|
|
2474
2678
|
if run_response is not None:
|
|
@@ -2530,7 +2734,7 @@ class Agent:
|
|
|
2530
2734
|
dependencies=run_dependencies,
|
|
2531
2735
|
metadata=metadata,
|
|
2532
2736
|
response_format=response_format,
|
|
2533
|
-
|
|
2737
|
+
stream_events=stream_events,
|
|
2534
2738
|
debug_mode=debug_mode,
|
|
2535
2739
|
**kwargs,
|
|
2536
2740
|
)
|
|
@@ -2604,95 +2808,105 @@ class Agent:
|
|
|
2604
2808
|
Steps:
|
|
2605
2809
|
1. Handle any updated tools
|
|
2606
2810
|
2. Generate a response from the Model
|
|
2607
|
-
3. Update
|
|
2608
|
-
4.
|
|
2609
|
-
5.
|
|
2610
|
-
6.
|
|
2611
|
-
7.
|
|
2811
|
+
3. Update the RunOutput with the model response
|
|
2812
|
+
4. Convert response to structured format
|
|
2813
|
+
5. Store media if enabled
|
|
2814
|
+
6. Execute post-hooks
|
|
2815
|
+
7. Create session summary
|
|
2816
|
+
8. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
2612
2817
|
"""
|
|
2818
|
+
# Register run for cancellation tracking
|
|
2819
|
+
register_run(run_response.run_id) # type: ignore
|
|
2820
|
+
|
|
2613
2821
|
self.model = cast(Model, self.model)
|
|
2614
2822
|
|
|
2615
2823
|
# 1. Handle the updated tools
|
|
2616
2824
|
self._handle_tool_call_updates(run_response=run_response, run_messages=run_messages)
|
|
2617
2825
|
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
messages=run_messages.messages,
|
|
2622
|
-
response_format=response_format,
|
|
2623
|
-
tools=self._tools_for_model,
|
|
2624
|
-
functions=self._functions_for_model,
|
|
2625
|
-
tool_choice=self.tool_choice,
|
|
2626
|
-
tool_call_limit=self.tool_call_limit,
|
|
2627
|
-
)
|
|
2826
|
+
try:
|
|
2827
|
+
# Check for cancellation before model call
|
|
2828
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2628
2829
|
|
|
2629
|
-
|
|
2630
|
-
|
|
2631
|
-
|
|
2632
|
-
|
|
2633
|
-
|
|
2830
|
+
# 2. Generate a response from the Model (includes running function calls)
|
|
2831
|
+
self.model = cast(Model, self.model)
|
|
2832
|
+
model_response: ModelResponse = self.model.response(
|
|
2833
|
+
messages=run_messages.messages,
|
|
2834
|
+
response_format=response_format,
|
|
2835
|
+
tools=self._tools_for_model,
|
|
2836
|
+
functions=self._functions_for_model,
|
|
2837
|
+
tool_choice=self.tool_choice,
|
|
2838
|
+
tool_call_limit=self.tool_call_limit,
|
|
2839
|
+
)
|
|
2634
2840
|
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
|
|
2640
|
-
|
|
2641
|
-
user_id=user_id,
|
|
2841
|
+
# Check for cancellation after model processing
|
|
2842
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2843
|
+
|
|
2844
|
+
# 3. Update the RunOutput with the model response
|
|
2845
|
+
self._update_run_response(
|
|
2846
|
+
model_response=model_response, run_response=run_response, run_messages=run_messages
|
|
2642
2847
|
)
|
|
2643
2848
|
|
|
2644
|
-
|
|
2645
|
-
|
|
2849
|
+
# We should break out of the run function
|
|
2850
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
2851
|
+
return self._handle_agent_run_paused(run_response=run_response, session=session, user_id=user_id)
|
|
2646
2852
|
|
|
2647
|
-
|
|
2648
|
-
|
|
2853
|
+
# 4. Convert the response to the structured format if needed
|
|
2854
|
+
self._convert_response_to_structured_format(run_response)
|
|
2649
2855
|
|
|
2650
|
-
|
|
2651
|
-
self.
|
|
2652
|
-
|
|
2653
|
-
run_output=run_response,
|
|
2654
|
-
session_state=session_state,
|
|
2655
|
-
dependencies=dependencies,
|
|
2656
|
-
metadata=metadata,
|
|
2657
|
-
session=session,
|
|
2658
|
-
user_id=user_id,
|
|
2659
|
-
debug_mode=debug_mode,
|
|
2660
|
-
**kwargs,
|
|
2661
|
-
)
|
|
2856
|
+
# 5. Store media if enabled
|
|
2857
|
+
if self.store_media:
|
|
2858
|
+
self._store_media(run_response, model_response)
|
|
2662
2859
|
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2666
|
-
|
|
2860
|
+
# 6. Execute post-hooks
|
|
2861
|
+
if self.post_hooks is not None:
|
|
2862
|
+
post_hook_iterator = self._execute_post_hooks(
|
|
2863
|
+
hooks=self.post_hooks, # type: ignore
|
|
2864
|
+
run_output=run_response,
|
|
2865
|
+
session=session,
|
|
2866
|
+
user_id=user_id,
|
|
2867
|
+
session_state=session_state,
|
|
2868
|
+
dependencies=dependencies,
|
|
2869
|
+
metadata=metadata,
|
|
2870
|
+
debug_mode=debug_mode,
|
|
2871
|
+
**kwargs,
|
|
2872
|
+
)
|
|
2873
|
+
deque(post_hook_iterator, maxlen=0)
|
|
2874
|
+
# Check for cancellation
|
|
2875
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2667
2876
|
|
|
2668
|
-
|
|
2669
|
-
|
|
2670
|
-
|
|
2671
|
-
|
|
2672
|
-
session_id=session.session_id,
|
|
2673
|
-
user_id=user_id,
|
|
2674
|
-
)
|
|
2877
|
+
# 7. Create session summary
|
|
2878
|
+
if self.session_summary_manager is not None:
|
|
2879
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
2880
|
+
session.upsert_run(run=run_response)
|
|
2675
2881
|
|
|
2676
|
-
|
|
2677
|
-
|
|
2882
|
+
try:
|
|
2883
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
2884
|
+
except Exception as e:
|
|
2885
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2678
2886
|
|
|
2679
|
-
|
|
2680
|
-
|
|
2681
|
-
run_response=run_response,
|
|
2682
|
-
run_messages=run_messages,
|
|
2683
|
-
session=session,
|
|
2684
|
-
user_id=user_id,
|
|
2685
|
-
)
|
|
2686
|
-
# Consume the response iterator to ensure the memory is updated before the run is completed
|
|
2687
|
-
deque(response_iterator, maxlen=0)
|
|
2887
|
+
# Set the run status to completed
|
|
2888
|
+
run_response.status = RunStatus.completed
|
|
2688
2889
|
|
|
2689
|
-
|
|
2690
|
-
|
|
2890
|
+
# 8. Cleanup and store the run response and session
|
|
2891
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
2691
2892
|
|
|
2692
|
-
|
|
2693
|
-
|
|
2893
|
+
# Log Agent Telemetry
|
|
2894
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
2694
2895
|
|
|
2695
|
-
|
|
2896
|
+
return run_response
|
|
2897
|
+
except RunCancelledException as e:
|
|
2898
|
+
# Handle run cancellation during async streaming
|
|
2899
|
+
log_info(f"Run {run_response.run_id} was cancelled")
|
|
2900
|
+
run_response.status = RunStatus.cancelled
|
|
2901
|
+
run_response.content = str(e)
|
|
2902
|
+
|
|
2903
|
+
# Cleanup and store the run response and session
|
|
2904
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
2905
|
+
|
|
2906
|
+
return run_response
|
|
2907
|
+
finally:
|
|
2908
|
+
# Always clean up the run tracking
|
|
2909
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2696
2910
|
|
|
2697
2911
|
def _continue_run_stream(
|
|
2698
2912
|
self,
|
|
@@ -2703,7 +2917,7 @@ class Agent:
|
|
|
2703
2917
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2704
2918
|
user_id: Optional[str] = None,
|
|
2705
2919
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2706
|
-
|
|
2920
|
+
stream_events: bool = False,
|
|
2707
2921
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2708
2922
|
debug_mode: Optional[bool] = None,
|
|
2709
2923
|
**kwargs,
|
|
@@ -2711,100 +2925,149 @@ class Agent:
|
|
|
2711
2925
|
"""Continue a previous run.
|
|
2712
2926
|
|
|
2713
2927
|
Steps:
|
|
2714
|
-
1.
|
|
2715
|
-
2.
|
|
2716
|
-
3.
|
|
2717
|
-
4.
|
|
2718
|
-
5.
|
|
2719
|
-
6.
|
|
2720
|
-
7. Create the run completed event
|
|
2721
|
-
8. Save session to storage
|
|
2928
|
+
1. Resolve dependencies
|
|
2929
|
+
2. Handle any updated tools
|
|
2930
|
+
3. Process model response
|
|
2931
|
+
4. Execute post-hooks
|
|
2932
|
+
5. Create session summary
|
|
2933
|
+
6. Cleanup and store the run response and session
|
|
2722
2934
|
"""
|
|
2723
2935
|
|
|
2936
|
+
# 1. Resolve dependencies
|
|
2724
2937
|
if dependencies is not None:
|
|
2725
2938
|
self._resolve_run_dependencies(dependencies=dependencies)
|
|
2726
2939
|
|
|
2727
2940
|
# Start the Run by yielding a RunContinued event
|
|
2728
|
-
if
|
|
2729
|
-
yield
|
|
2730
|
-
|
|
2731
|
-
|
|
2732
|
-
|
|
2941
|
+
if stream_events:
|
|
2942
|
+
yield handle_event( # type: ignore
|
|
2943
|
+
create_run_continued_event(run_response),
|
|
2944
|
+
run_response,
|
|
2945
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2946
|
+
store_events=self.store_events,
|
|
2947
|
+
)
|
|
2733
2948
|
|
|
2734
|
-
# 2.
|
|
2735
|
-
|
|
2736
|
-
|
|
2737
|
-
|
|
2738
|
-
run_messages=run_messages,
|
|
2739
|
-
response_format=response_format,
|
|
2740
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2741
|
-
):
|
|
2742
|
-
yield event
|
|
2949
|
+
# 2. Handle the updated tools
|
|
2950
|
+
yield from self._handle_tool_call_updates_stream(
|
|
2951
|
+
run_response=run_response, run_messages=run_messages, stream_events=stream_events
|
|
2952
|
+
)
|
|
2743
2953
|
|
|
2744
|
-
|
|
2745
|
-
|
|
2746
|
-
|
|
2954
|
+
try:
|
|
2955
|
+
# 3. Process model response
|
|
2956
|
+
for event in self._handle_model_response_stream(
|
|
2957
|
+
session=session,
|
|
2747
2958
|
run_response=run_response,
|
|
2748
2959
|
run_messages=run_messages,
|
|
2749
|
-
|
|
2750
|
-
|
|
2751
|
-
)
|
|
2752
|
-
|
|
2960
|
+
response_format=response_format,
|
|
2961
|
+
stream_events=stream_events,
|
|
2962
|
+
):
|
|
2963
|
+
yield event
|
|
2753
2964
|
|
|
2754
|
-
|
|
2755
|
-
self.
|
|
2756
|
-
|
|
2757
|
-
run_output=run_response,
|
|
2758
|
-
session_state=session_state,
|
|
2759
|
-
dependencies=dependencies,
|
|
2760
|
-
metadata=metadata,
|
|
2761
|
-
session=session,
|
|
2762
|
-
user_id=user_id,
|
|
2763
|
-
debug_mode=debug_mode,
|
|
2764
|
-
**kwargs,
|
|
2965
|
+
# Parse response with parser model if provided
|
|
2966
|
+
yield from self._parse_response_with_parser_model_stream(
|
|
2967
|
+
session=session, run_response=run_response, stream_events=stream_events
|
|
2765
2968
|
)
|
|
2766
2969
|
|
|
2767
|
-
|
|
2970
|
+
# Yield RunContentCompletedEvent
|
|
2971
|
+
if stream_events:
|
|
2972
|
+
yield handle_event( # type: ignore
|
|
2973
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
2974
|
+
run_response,
|
|
2975
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
2976
|
+
store_events=self.store_events,
|
|
2977
|
+
)
|
|
2768
2978
|
|
|
2769
|
-
|
|
2770
|
-
|
|
2771
|
-
|
|
2979
|
+
# We should break out of the run function
|
|
2980
|
+
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
2981
|
+
yield from self._handle_agent_run_paused_stream(
|
|
2982
|
+
run_response=run_response, session=session, user_id=user_id
|
|
2983
|
+
)
|
|
2984
|
+
return
|
|
2772
2985
|
|
|
2773
|
-
|
|
2774
|
-
|
|
2986
|
+
# Execute post-hooks
|
|
2987
|
+
if self.post_hooks is not None:
|
|
2988
|
+
yield from self._execute_post_hooks(
|
|
2989
|
+
hooks=self.post_hooks, # type: ignore
|
|
2990
|
+
run_output=run_response,
|
|
2991
|
+
session=session,
|
|
2992
|
+
session_state=session_state,
|
|
2993
|
+
dependencies=dependencies,
|
|
2994
|
+
metadata=metadata,
|
|
2995
|
+
user_id=user_id,
|
|
2996
|
+
debug_mode=debug_mode,
|
|
2997
|
+
**kwargs,
|
|
2998
|
+
)
|
|
2775
2999
|
|
|
2776
|
-
|
|
2777
|
-
|
|
2778
|
-
run_response=run_response,
|
|
2779
|
-
input=run_messages.user_message,
|
|
2780
|
-
session_id=session.session_id,
|
|
2781
|
-
user_id=user_id,
|
|
2782
|
-
)
|
|
3000
|
+
# Check for cancellation before model call
|
|
3001
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2783
3002
|
|
|
2784
|
-
|
|
2785
|
-
|
|
3003
|
+
# 4. Create session summary
|
|
3004
|
+
if self.session_summary_manager is not None:
|
|
3005
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3006
|
+
session.upsert_run(run=run_response)
|
|
2786
3007
|
|
|
2787
|
-
|
|
2788
|
-
|
|
2789
|
-
|
|
2790
|
-
|
|
2791
|
-
|
|
2792
|
-
|
|
2793
|
-
|
|
3008
|
+
if stream_events:
|
|
3009
|
+
yield handle_event( # type: ignore
|
|
3010
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
3011
|
+
run_response,
|
|
3012
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3013
|
+
store_events=self.store_events,
|
|
3014
|
+
)
|
|
3015
|
+
try:
|
|
3016
|
+
self.session_summary_manager.create_session_summary(session=session)
|
|
3017
|
+
except Exception as e:
|
|
3018
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
2794
3019
|
|
|
2795
|
-
|
|
2796
|
-
|
|
3020
|
+
if stream_events:
|
|
3021
|
+
yield handle_event( # type: ignore
|
|
3022
|
+
create_session_summary_completed_event(
|
|
3023
|
+
from_run_response=run_response, session_summary=session.summary
|
|
3024
|
+
),
|
|
3025
|
+
run_response,
|
|
3026
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3027
|
+
store_events=self.store_events,
|
|
3028
|
+
)
|
|
2797
3029
|
|
|
2798
|
-
|
|
2799
|
-
|
|
3030
|
+
# Create the run completed event
|
|
3031
|
+
completed_event = handle_event(
|
|
3032
|
+
create_run_completed_event(run_response),
|
|
3033
|
+
run_response,
|
|
3034
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3035
|
+
store_events=self.store_events,
|
|
3036
|
+
)
|
|
2800
3037
|
|
|
2801
|
-
|
|
2802
|
-
|
|
3038
|
+
# Set the run status to completed
|
|
3039
|
+
run_response.status = RunStatus.completed
|
|
3040
|
+
|
|
3041
|
+
# 5. Cleanup and store the run response and session
|
|
3042
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
2803
3043
|
|
|
2804
|
-
|
|
2805
|
-
|
|
3044
|
+
if stream_events:
|
|
3045
|
+
yield completed_event # type: ignore
|
|
2806
3046
|
|
|
2807
|
-
|
|
3047
|
+
# Log Agent Telemetry
|
|
3048
|
+
self._log_agent_telemetry(session_id=session.session_id, run_id=run_response.run_id)
|
|
3049
|
+
|
|
3050
|
+
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
|
|
3051
|
+
|
|
3052
|
+
except RunCancelledException as e:
|
|
3053
|
+
# Handle run cancellation during async streaming
|
|
3054
|
+
log_info(f"Run {run_response.run_id} was cancelled during streaming")
|
|
3055
|
+
run_response.status = RunStatus.cancelled
|
|
3056
|
+
run_response.content = str(e)
|
|
3057
|
+
|
|
3058
|
+
# Yield the cancellation event
|
|
3059
|
+
yield handle_event( # type: ignore
|
|
3060
|
+
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
3061
|
+
run_response,
|
|
3062
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3063
|
+
store_events=self.store_events,
|
|
3064
|
+
)
|
|
3065
|
+
|
|
3066
|
+
# Cleanup and store the run response and session
|
|
3067
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
3068
|
+
finally:
|
|
3069
|
+
# Always clean up the run tracking
|
|
3070
|
+
cleanup_run(run_response.run_id) # type: ignore
|
|
2808
3071
|
|
|
2809
3072
|
@overload
|
|
2810
3073
|
async def acontinue_run(
|
|
@@ -2812,6 +3075,7 @@ class Agent:
|
|
|
2812
3075
|
run_response: Optional[RunOutput] = None,
|
|
2813
3076
|
*,
|
|
2814
3077
|
stream: Literal[False] = False,
|
|
3078
|
+
stream_events: Optional[bool] = None,
|
|
2815
3079
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2816
3080
|
run_id: Optional[str] = None,
|
|
2817
3081
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
@@ -2830,6 +3094,7 @@ class Agent:
|
|
|
2830
3094
|
run_response: Optional[RunOutput] = None,
|
|
2831
3095
|
*,
|
|
2832
3096
|
stream: Literal[True] = True,
|
|
3097
|
+
stream_events: Optional[bool] = None,
|
|
2833
3098
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2834
3099
|
run_id: Optional[str] = None,
|
|
2835
3100
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
@@ -2849,6 +3114,7 @@ class Agent:
|
|
|
2849
3114
|
run_id: Optional[str] = None,
|
|
2850
3115
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
2851
3116
|
stream: Optional[bool] = None,
|
|
3117
|
+
stream_events: Optional[bool] = None,
|
|
2852
3118
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2853
3119
|
user_id: Optional[str] = None,
|
|
2854
3120
|
session_id: Optional[str] = None,
|
|
@@ -2867,13 +3133,16 @@ class Agent:
|
|
|
2867
3133
|
run_id: The run id to continue. Alternative to passing run_response.
|
|
2868
3134
|
updated_tools: The updated tools to use for the run. Required to be used with `run_id`.
|
|
2869
3135
|
stream: Whether to stream the response.
|
|
2870
|
-
|
|
3136
|
+
stream_events: Whether to stream all events.
|
|
2871
3137
|
user_id: The user id to continue the run for.
|
|
2872
3138
|
session_id: The session id to continue the run for.
|
|
2873
3139
|
retries: The number of retries to continue the run for.
|
|
2874
3140
|
knowledge_filters: The knowledge filters to use for the run.
|
|
2875
3141
|
dependencies: The dependencies to use for continuing the run.
|
|
3142
|
+
metadata: The metadata to use for continuing the run.
|
|
2876
3143
|
debug_mode: Whether to enable debug mode.
|
|
3144
|
+
yield_run_response: Whether to yield the run response.
|
|
3145
|
+
(deprecated) stream_intermediate_steps: Whether to stream all steps.
|
|
2877
3146
|
"""
|
|
2878
3147
|
if run_response is None and run_id is None:
|
|
2879
3148
|
raise ValueError("Either run_response or run_id must be provided.")
|
|
@@ -2881,10 +3150,9 @@ class Agent:
|
|
|
2881
3150
|
if run_response is None and (run_id is not None and (session_id is None and self.session_id is None)):
|
|
2882
3151
|
raise ValueError("Session ID is required to continue a run from a run_id.")
|
|
2883
3152
|
|
|
2884
|
-
session_id, user_id
|
|
2885
|
-
run_id=run_id, # type: ignore
|
|
3153
|
+
session_id, user_id = self._initialize_session(
|
|
2886
3154
|
session_id=session_id,
|
|
2887
|
-
user_id=user_id,
|
|
3155
|
+
user_id=user_id,
|
|
2888
3156
|
)
|
|
2889
3157
|
|
|
2890
3158
|
# Initialize the Agent
|
|
@@ -2899,17 +3167,22 @@ class Agent:
|
|
|
2899
3167
|
if stream is None:
|
|
2900
3168
|
stream = False if self.stream is None else self.stream
|
|
2901
3169
|
|
|
2902
|
-
|
|
2903
|
-
|
|
2904
|
-
|
|
2905
|
-
|
|
3170
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
3171
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
3172
|
+
|
|
3173
|
+
# Can't stream events if streaming is disabled
|
|
3174
|
+
if stream is False:
|
|
3175
|
+
stream_events = False
|
|
3176
|
+
|
|
3177
|
+
if stream_events is None:
|
|
3178
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
2906
3179
|
|
|
2907
3180
|
# Can't have stream_intermediate_steps if stream is False
|
|
2908
3181
|
if stream is False:
|
|
2909
|
-
|
|
3182
|
+
stream_events = False
|
|
2910
3183
|
|
|
2911
3184
|
self.stream = self.stream or stream
|
|
2912
|
-
self.
|
|
3185
|
+
self.stream_events = self.stream_events or stream_events
|
|
2913
3186
|
|
|
2914
3187
|
# Get knowledge filters
|
|
2915
3188
|
effective_filters = knowledge_filters
|
|
@@ -2936,14 +3209,13 @@ class Agent:
|
|
|
2936
3209
|
run_response=run_response,
|
|
2937
3210
|
updated_tools=updated_tools,
|
|
2938
3211
|
knowledge_filters=effective_filters,
|
|
2939
|
-
session_state=session_state,
|
|
2940
3212
|
run_id=run_id,
|
|
2941
3213
|
user_id=user_id,
|
|
2942
3214
|
session_id=session_id,
|
|
2943
3215
|
response_format=response_format,
|
|
2944
3216
|
dependencies=run_dependencies,
|
|
3217
|
+
stream_events=stream_events,
|
|
2945
3218
|
metadata=metadata,
|
|
2946
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2947
3219
|
yield_run_response=yield_run_response,
|
|
2948
3220
|
debug_mode=debug_mode,
|
|
2949
3221
|
**kwargs,
|
|
@@ -2954,7 +3226,6 @@ class Agent:
|
|
|
2954
3226
|
run_response=run_response,
|
|
2955
3227
|
updated_tools=updated_tools,
|
|
2956
3228
|
knowledge_filters=effective_filters,
|
|
2957
|
-
session_state=session_state,
|
|
2958
3229
|
run_id=run_id,
|
|
2959
3230
|
user_id=user_id,
|
|
2960
3231
|
response_format=response_format,
|
|
@@ -3006,7 +3277,6 @@ class Agent:
|
|
|
3006
3277
|
run_response: Optional[RunOutput] = None,
|
|
3007
3278
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
3008
3279
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
3009
|
-
session_state: Optional[Dict[str, Any]] = None,
|
|
3010
3280
|
run_id: Optional[str] = None,
|
|
3011
3281
|
user_id: Optional[str] = None,
|
|
3012
3282
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
@@ -3027,18 +3297,16 @@ class Agent:
|
|
|
3027
3297
|
7. Handle the updated tools
|
|
3028
3298
|
8. Get model response
|
|
3029
3299
|
9. Update the RunOutput with the model response
|
|
3030
|
-
10.
|
|
3031
|
-
11.
|
|
3032
|
-
12.
|
|
3033
|
-
13.
|
|
3300
|
+
10. Convert response to structured format
|
|
3301
|
+
11. Store media if enabled
|
|
3302
|
+
12. Execute post-hooks
|
|
3303
|
+
13. Create session summary
|
|
3304
|
+
14. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
|
|
3034
3305
|
"""
|
|
3035
3306
|
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
|
|
3036
3307
|
|
|
3037
3308
|
# 1. Read existing session from db
|
|
3038
|
-
|
|
3039
|
-
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
3040
|
-
else:
|
|
3041
|
-
agent_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
3309
|
+
agent_session = await self._aread_or_create_session(session_id=session_id, user_id=user_id)
|
|
3042
3310
|
|
|
3043
3311
|
# 2. Resolve dependencies
|
|
3044
3312
|
if dependencies is not None:
|
|
@@ -3046,6 +3314,11 @@ class Agent:
|
|
|
3046
3314
|
|
|
3047
3315
|
# 3. Update metadata and session state
|
|
3048
3316
|
self._update_metadata(session=agent_session)
|
|
3317
|
+
# Initialize session state
|
|
3318
|
+
session_state = self._initialize_session_state(
|
|
3319
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
3320
|
+
)
|
|
3321
|
+
# Update session state from DB
|
|
3049
3322
|
if session_state is not None:
|
|
3050
3323
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
3051
3324
|
|
|
@@ -3120,30 +3393,24 @@ class Agent:
|
|
|
3120
3393
|
run_messages=run_messages,
|
|
3121
3394
|
)
|
|
3122
3395
|
|
|
3123
|
-
if self.store_media:
|
|
3124
|
-
self._store_media(run_response, model_response)
|
|
3125
|
-
else:
|
|
3126
|
-
self._scrub_media_from_run_output(run_response)
|
|
3127
|
-
|
|
3128
3396
|
# Break out of the run function if a tool call is paused
|
|
3129
3397
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
3130
|
-
return self.
|
|
3131
|
-
run_response=run_response,
|
|
3132
|
-
run_messages=run_messages,
|
|
3133
|
-
session=agent_session,
|
|
3134
|
-
user_id=user_id,
|
|
3398
|
+
return await self._ahandle_agent_run_paused(
|
|
3399
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
3135
3400
|
)
|
|
3136
|
-
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3137
3401
|
|
|
3138
|
-
# 10.
|
|
3139
|
-
self._update_session_metrics(session=agent_session, run_response=run_response)
|
|
3140
|
-
|
|
3141
|
-
# Convert the response to the structured format if needed
|
|
3402
|
+
# 10. Convert the response to the structured format if needed
|
|
3142
3403
|
self._convert_response_to_structured_format(run_response)
|
|
3143
3404
|
|
|
3144
|
-
# 11.
|
|
3405
|
+
# 11. Store media if enabled
|
|
3406
|
+
if self.store_media:
|
|
3407
|
+
self._store_media(run_response, model_response)
|
|
3408
|
+
|
|
3409
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3410
|
+
|
|
3411
|
+
# 12. Execute post-hooks
|
|
3145
3412
|
if self.post_hooks is not None:
|
|
3146
|
-
|
|
3413
|
+
async for _ in self._aexecute_post_hooks(
|
|
3147
3414
|
hooks=self.post_hooks, # type: ignore
|
|
3148
3415
|
run_output=run_response,
|
|
3149
3416
|
session=agent_session,
|
|
@@ -3153,37 +3420,27 @@ class Agent:
|
|
|
3153
3420
|
dependencies=dependencies,
|
|
3154
3421
|
metadata=metadata,
|
|
3155
3422
|
**kwargs,
|
|
3156
|
-
)
|
|
3157
|
-
|
|
3158
|
-
run_response.status = RunStatus.completed
|
|
3423
|
+
):
|
|
3424
|
+
pass
|
|
3159
3425
|
|
|
3160
|
-
|
|
3161
|
-
|
|
3426
|
+
# Check for cancellation
|
|
3427
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3162
3428
|
|
|
3163
|
-
#
|
|
3164
|
-
|
|
3165
|
-
|
|
3166
|
-
|
|
3167
|
-
session=agent_session,
|
|
3168
|
-
user_id=user_id,
|
|
3169
|
-
):
|
|
3170
|
-
pass
|
|
3429
|
+
# 13. Create session summary
|
|
3430
|
+
if self.session_summary_manager is not None:
|
|
3431
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3432
|
+
agent_session.upsert_run(run=run_response)
|
|
3171
3433
|
|
|
3172
|
-
|
|
3173
|
-
|
|
3174
|
-
|
|
3175
|
-
|
|
3176
|
-
session_id=agent_session.session_id,
|
|
3177
|
-
user_id=user_id,
|
|
3178
|
-
)
|
|
3434
|
+
try:
|
|
3435
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
3436
|
+
except Exception as e:
|
|
3437
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
3179
3438
|
|
|
3180
|
-
|
|
3439
|
+
# Set the run status to completed
|
|
3440
|
+
run_response.status = RunStatus.completed
|
|
3181
3441
|
|
|
3182
|
-
#
|
|
3183
|
-
|
|
3184
|
-
await self.asave_session(session=agent_session)
|
|
3185
|
-
else:
|
|
3186
|
-
self.save_session(session=agent_session)
|
|
3442
|
+
# 14. Cleanup and store the run response and session
|
|
3443
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3187
3444
|
|
|
3188
3445
|
# Log Agent Telemetry
|
|
3189
3446
|
await self._alog_agent_telemetry(session_id=agent_session.session_id, run_id=run_response.run_id)
|
|
@@ -3198,12 +3455,8 @@ class Agent:
|
|
|
3198
3455
|
run_response.content = str(e)
|
|
3199
3456
|
run_response.status = RunStatus.cancelled
|
|
3200
3457
|
|
|
3201
|
-
#
|
|
3202
|
-
|
|
3203
|
-
if self._has_async_db():
|
|
3204
|
-
await self.asave_session(session=agent_session)
|
|
3205
|
-
else:
|
|
3206
|
-
self.save_session(session=agent_session)
|
|
3458
|
+
# Cleanup and store the run response and session
|
|
3459
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3207
3460
|
|
|
3208
3461
|
return run_response
|
|
3209
3462
|
finally:
|
|
@@ -3216,11 +3469,10 @@ class Agent:
|
|
|
3216
3469
|
run_response: Optional[RunOutput] = None,
|
|
3217
3470
|
updated_tools: Optional[List[ToolExecution]] = None,
|
|
3218
3471
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
3219
|
-
session_state: Optional[Dict[str, Any]] = None,
|
|
3220
3472
|
run_id: Optional[str] = None,
|
|
3221
3473
|
user_id: Optional[str] = None,
|
|
3222
3474
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
3223
|
-
|
|
3475
|
+
stream_events: bool = False,
|
|
3224
3476
|
yield_run_response: Optional[bool] = None,
|
|
3225
3477
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3226
3478
|
metadata: Optional[Dict[str, Any]] = None,
|
|
@@ -3238,12 +3490,9 @@ class Agent:
|
|
|
3238
3490
|
6. Prepare run messages
|
|
3239
3491
|
7. Handle the updated tools
|
|
3240
3492
|
8. Process model response
|
|
3241
|
-
9.
|
|
3242
|
-
10.
|
|
3243
|
-
11.
|
|
3244
|
-
12. Create the run completed event
|
|
3245
|
-
13. Add the RunOutput to Agent Session
|
|
3246
|
-
14. Save session to storage
|
|
3493
|
+
9. Create session summary
|
|
3494
|
+
10. Execute post-hooks
|
|
3495
|
+
11. Cleanup and store the run response and session
|
|
3247
3496
|
"""
|
|
3248
3497
|
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
|
|
3249
3498
|
|
|
@@ -3256,6 +3505,11 @@ class Agent:
|
|
|
3256
3505
|
|
|
3257
3506
|
# 3. Update session state and metadata
|
|
3258
3507
|
self._update_metadata(session=agent_session)
|
|
3508
|
+
# Initialize session state
|
|
3509
|
+
session_state = self._initialize_session_state(
|
|
3510
|
+
session_state={}, user_id=user_id, session_id=session_id, run_id=run_id
|
|
3511
|
+
)
|
|
3512
|
+
# Update session state from DB
|
|
3259
3513
|
if session_state is not None:
|
|
3260
3514
|
session_state = self._load_session_state(session=agent_session, session_state=session_state)
|
|
3261
3515
|
|
|
@@ -3303,8 +3557,13 @@ class Agent:
|
|
|
3303
3557
|
|
|
3304
3558
|
try:
|
|
3305
3559
|
# Start the Run by yielding a RunContinued event
|
|
3306
|
-
if
|
|
3307
|
-
yield
|
|
3560
|
+
if stream_events:
|
|
3561
|
+
yield handle_event( # type: ignore
|
|
3562
|
+
create_run_continued_event(run_response),
|
|
3563
|
+
run_response,
|
|
3564
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3565
|
+
store_events=self.store_events,
|
|
3566
|
+
)
|
|
3308
3567
|
|
|
3309
3568
|
# 7. Handle the updated tools
|
|
3310
3569
|
async for event in self._ahandle_tool_call_updates_stream(
|
|
@@ -3320,7 +3579,7 @@ class Agent:
|
|
|
3320
3579
|
run_response=run_response,
|
|
3321
3580
|
run_messages=run_messages,
|
|
3322
3581
|
response_format=response_format,
|
|
3323
|
-
|
|
3582
|
+
stream_events=stream_events,
|
|
3324
3583
|
):
|
|
3325
3584
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3326
3585
|
yield event
|
|
@@ -3335,11 +3594,11 @@ class Agent:
|
|
|
3335
3594
|
run_response=run_response,
|
|
3336
3595
|
run_messages=run_messages,
|
|
3337
3596
|
response_format=response_format,
|
|
3338
|
-
|
|
3597
|
+
stream_events=stream_events,
|
|
3339
3598
|
):
|
|
3340
3599
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3341
3600
|
if isinstance(event, RunContentEvent):
|
|
3342
|
-
if
|
|
3601
|
+
if stream_events:
|
|
3343
3602
|
yield IntermediateRunContentEvent(
|
|
3344
3603
|
content=event.content,
|
|
3345
3604
|
content_type=event.content_type,
|
|
@@ -3352,7 +3611,7 @@ class Agent:
|
|
|
3352
3611
|
session=agent_session,
|
|
3353
3612
|
run_response=run_response,
|
|
3354
3613
|
run_messages=run_messages,
|
|
3355
|
-
|
|
3614
|
+
stream_events=stream_events,
|
|
3356
3615
|
):
|
|
3357
3616
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3358
3617
|
yield event
|
|
@@ -3360,71 +3619,88 @@ class Agent:
|
|
|
3360
3619
|
# Check for cancellation after model processing
|
|
3361
3620
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3362
3621
|
|
|
3622
|
+
# Parse response with parser model if provided
|
|
3623
|
+
async for event in self._aparse_response_with_parser_model_stream(
|
|
3624
|
+
session=agent_session, run_response=run_response, stream_events=stream_events
|
|
3625
|
+
):
|
|
3626
|
+
yield event
|
|
3627
|
+
|
|
3628
|
+
# Yield RunContentCompletedEvent
|
|
3629
|
+
if stream_events:
|
|
3630
|
+
yield handle_event( # type: ignore
|
|
3631
|
+
create_run_content_completed_event(from_run_response=run_response),
|
|
3632
|
+
run_response,
|
|
3633
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3634
|
+
store_events=self.store_events,
|
|
3635
|
+
)
|
|
3636
|
+
|
|
3363
3637
|
# Break out of the run function if a tool call is paused
|
|
3364
3638
|
if any(tool_call.is_paused for tool_call in run_response.tools or []):
|
|
3365
|
-
for item in self.
|
|
3366
|
-
run_response=run_response,
|
|
3367
|
-
run_messages=run_messages,
|
|
3368
|
-
session=agent_session,
|
|
3369
|
-
user_id=user_id,
|
|
3639
|
+
async for item in self._ahandle_agent_run_paused_stream(
|
|
3640
|
+
run_response=run_response, session=agent_session, user_id=user_id
|
|
3370
3641
|
):
|
|
3371
3642
|
yield item
|
|
3372
3643
|
return
|
|
3373
3644
|
|
|
3374
|
-
#
|
|
3375
|
-
completed_event = self._handle_event(create_run_completed_event(run_response), run_response)
|
|
3376
|
-
|
|
3377
|
-
# 10. Execute post-hooks
|
|
3645
|
+
# 8. Execute post-hooks
|
|
3378
3646
|
if self.post_hooks is not None:
|
|
3379
|
-
|
|
3647
|
+
async for event in self._aexecute_post_hooks(
|
|
3380
3648
|
hooks=self.post_hooks, # type: ignore
|
|
3381
3649
|
run_output=run_response,
|
|
3382
3650
|
session=agent_session,
|
|
3383
3651
|
user_id=user_id,
|
|
3384
|
-
debug_mode=debug_mode,
|
|
3385
3652
|
session_state=session_state,
|
|
3386
3653
|
dependencies=dependencies,
|
|
3387
3654
|
metadata=metadata,
|
|
3655
|
+
debug_mode=debug_mode,
|
|
3388
3656
|
**kwargs,
|
|
3389
|
-
)
|
|
3390
|
-
|
|
3391
|
-
|
|
3657
|
+
):
|
|
3658
|
+
yield event
|
|
3659
|
+
# Check for cancellation before model call
|
|
3660
|
+
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
3392
3661
|
|
|
3393
|
-
#
|
|
3394
|
-
if
|
|
3395
|
-
|
|
3662
|
+
# 9. Create session summary
|
|
3663
|
+
if self.session_summary_manager is not None:
|
|
3664
|
+
# Upsert the RunOutput to Agent Session before creating the session summary
|
|
3665
|
+
agent_session.upsert_run(run=run_response)
|
|
3396
3666
|
|
|
3397
|
-
|
|
3398
|
-
|
|
3667
|
+
if stream_events:
|
|
3668
|
+
yield handle_event( # type: ignore
|
|
3669
|
+
create_session_summary_started_event(from_run_response=run_response),
|
|
3670
|
+
run_response,
|
|
3671
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3672
|
+
store_events=self.store_events,
|
|
3673
|
+
)
|
|
3674
|
+
try:
|
|
3675
|
+
await self.session_summary_manager.acreate_session_summary(session=agent_session)
|
|
3676
|
+
except Exception as e:
|
|
3677
|
+
log_warning(f"Error in session summary creation: {str(e)}")
|
|
3678
|
+
if stream_events:
|
|
3679
|
+
yield handle_event( # type: ignore
|
|
3680
|
+
create_session_summary_completed_event(
|
|
3681
|
+
from_run_response=run_response, session_summary=agent_session.summary
|
|
3682
|
+
),
|
|
3683
|
+
run_response,
|
|
3684
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3685
|
+
store_events=self.store_events,
|
|
3686
|
+
)
|
|
3399
3687
|
|
|
3400
|
-
#
|
|
3401
|
-
|
|
3402
|
-
run_response
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3688
|
+
# Create the run completed event
|
|
3689
|
+
completed_event = handle_event(
|
|
3690
|
+
create_run_completed_event(run_response),
|
|
3691
|
+
run_response,
|
|
3692
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3693
|
+
store_events=self.store_events,
|
|
3406
3694
|
)
|
|
3407
3695
|
|
|
3408
|
-
#
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
# 12. Update Agent Memory
|
|
3412
|
-
async for event in self._amake_memories_cultural_knowledge_and_summaries(
|
|
3413
|
-
run_response=run_response,
|
|
3414
|
-
run_messages=run_messages,
|
|
3415
|
-
session=agent_session,
|
|
3416
|
-
user_id=user_id,
|
|
3417
|
-
):
|
|
3418
|
-
yield event
|
|
3696
|
+
# Set the run status to completed
|
|
3697
|
+
run_response.status = RunStatus.completed
|
|
3419
3698
|
|
|
3420
|
-
#
|
|
3421
|
-
|
|
3422
|
-
await self.asave_session(session=agent_session)
|
|
3423
|
-
else:
|
|
3424
|
-
self.save_session(session=agent_session)
|
|
3699
|
+
# 10. Cleanup and store the run response and session
|
|
3700
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3425
3701
|
|
|
3426
|
-
if
|
|
3427
|
-
yield completed_event
|
|
3702
|
+
if stream_events:
|
|
3703
|
+
yield completed_event # type: ignore
|
|
3428
3704
|
|
|
3429
3705
|
if yield_run_response:
|
|
3430
3706
|
yield run_response
|
|
@@ -3440,17 +3716,15 @@ class Agent:
|
|
|
3440
3716
|
run_response.content = str(e)
|
|
3441
3717
|
|
|
3442
3718
|
# Yield the cancellation event
|
|
3443
|
-
yield
|
|
3719
|
+
yield handle_event( # type: ignore
|
|
3444
3720
|
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
|
|
3445
3721
|
run_response,
|
|
3722
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3723
|
+
store_events=self.store_events,
|
|
3446
3724
|
)
|
|
3447
3725
|
|
|
3448
|
-
#
|
|
3449
|
-
|
|
3450
|
-
if self._has_async_db():
|
|
3451
|
-
await self.asave_session(session=agent_session)
|
|
3452
|
-
else:
|
|
3453
|
-
self.save_session(session=agent_session)
|
|
3726
|
+
# Cleanup and store the run response and session
|
|
3727
|
+
await self._acleanup_and_store(run_response=run_response, session=agent_session, user_id=user_id)
|
|
3454
3728
|
finally:
|
|
3455
3729
|
# Always clean up the run tracking
|
|
3456
3730
|
cleanup_run(run_response.run_id) # type: ignore
|
|
@@ -3486,13 +3760,15 @@ class Agent:
|
|
|
3486
3760
|
all_args.update(kwargs)
|
|
3487
3761
|
|
|
3488
3762
|
for i, hook in enumerate(hooks):
|
|
3489
|
-
yield
|
|
3763
|
+
yield handle_event( # type: ignore
|
|
3490
3764
|
run_response=run_response,
|
|
3491
3765
|
event=create_pre_hook_started_event(
|
|
3492
3766
|
from_run_response=run_response,
|
|
3493
3767
|
run_input=run_input,
|
|
3494
3768
|
pre_hook_name=hook.__name__,
|
|
3495
3769
|
),
|
|
3770
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3771
|
+
store_events=self.store_events,
|
|
3496
3772
|
)
|
|
3497
3773
|
try:
|
|
3498
3774
|
# Filter arguments to only include those that the hook accepts
|
|
@@ -3500,13 +3776,15 @@ class Agent:
|
|
|
3500
3776
|
|
|
3501
3777
|
hook(**filtered_args)
|
|
3502
3778
|
|
|
3503
|
-
yield
|
|
3779
|
+
yield handle_event( # type: ignore
|
|
3504
3780
|
run_response=run_response,
|
|
3505
3781
|
event=create_pre_hook_completed_event(
|
|
3506
3782
|
from_run_response=run_response,
|
|
3507
3783
|
run_input=run_input,
|
|
3508
3784
|
pre_hook_name=hook.__name__,
|
|
3509
3785
|
),
|
|
3786
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3787
|
+
store_events=self.store_events,
|
|
3510
3788
|
)
|
|
3511
3789
|
|
|
3512
3790
|
except (InputCheckError, OutputCheckError) as e:
|
|
@@ -3552,13 +3830,15 @@ class Agent:
|
|
|
3552
3830
|
all_args.update(kwargs)
|
|
3553
3831
|
|
|
3554
3832
|
for i, hook in enumerate(hooks):
|
|
3555
|
-
yield
|
|
3833
|
+
yield handle_event( # type: ignore
|
|
3556
3834
|
run_response=run_response,
|
|
3557
3835
|
event=create_pre_hook_started_event(
|
|
3558
3836
|
from_run_response=run_response,
|
|
3559
3837
|
run_input=run_input,
|
|
3560
3838
|
pre_hook_name=hook.__name__,
|
|
3561
3839
|
),
|
|
3840
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3841
|
+
store_events=self.store_events,
|
|
3562
3842
|
)
|
|
3563
3843
|
try:
|
|
3564
3844
|
# Filter arguments to only include those that the hook accepts
|
|
@@ -3570,13 +3850,15 @@ class Agent:
|
|
|
3570
3850
|
# Synchronous function
|
|
3571
3851
|
hook(**filtered_args)
|
|
3572
3852
|
|
|
3573
|
-
yield
|
|
3853
|
+
yield handle_event( # type: ignore
|
|
3574
3854
|
run_response=run_response,
|
|
3575
3855
|
event=create_pre_hook_completed_event(
|
|
3576
3856
|
from_run_response=run_response,
|
|
3577
3857
|
run_input=run_input,
|
|
3578
3858
|
pre_hook_name=hook.__name__,
|
|
3579
3859
|
),
|
|
3860
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3861
|
+
store_events=self.store_events,
|
|
3580
3862
|
)
|
|
3581
3863
|
|
|
3582
3864
|
except (InputCheckError, OutputCheckError) as e:
|
|
@@ -3602,7 +3884,7 @@ class Agent:
|
|
|
3602
3884
|
user_id: Optional[str] = None,
|
|
3603
3885
|
debug_mode: Optional[bool] = None,
|
|
3604
3886
|
**kwargs: Any,
|
|
3605
|
-
) ->
|
|
3887
|
+
) -> Iterator[RunOutputEvent]:
|
|
3606
3888
|
"""Execute multiple post-hook functions in succession."""
|
|
3607
3889
|
if hooks is None:
|
|
3608
3890
|
return
|
|
@@ -3621,11 +3903,30 @@ class Agent:
|
|
|
3621
3903
|
all_args.update(kwargs)
|
|
3622
3904
|
|
|
3623
3905
|
for i, hook in enumerate(hooks):
|
|
3906
|
+
yield handle_event( # type: ignore
|
|
3907
|
+
run_response=run_output,
|
|
3908
|
+
event=create_post_hook_started_event(
|
|
3909
|
+
from_run_response=run_output,
|
|
3910
|
+
post_hook_name=hook.__name__,
|
|
3911
|
+
),
|
|
3912
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3913
|
+
store_events=self.store_events,
|
|
3914
|
+
)
|
|
3624
3915
|
try:
|
|
3625
3916
|
# Filter arguments to only include those that the hook accepts
|
|
3626
3917
|
filtered_args = filter_hook_args(hook, all_args)
|
|
3627
3918
|
|
|
3628
3919
|
hook(**filtered_args)
|
|
3920
|
+
|
|
3921
|
+
yield handle_event( # type: ignore
|
|
3922
|
+
run_response=run_output,
|
|
3923
|
+
event=create_post_hook_completed_event(
|
|
3924
|
+
from_run_response=run_output,
|
|
3925
|
+
post_hook_name=hook.__name__,
|
|
3926
|
+
),
|
|
3927
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3928
|
+
store_events=self.store_events,
|
|
3929
|
+
)
|
|
3629
3930
|
except (InputCheckError, OutputCheckError) as e:
|
|
3630
3931
|
raise e
|
|
3631
3932
|
except Exception as e:
|
|
@@ -3646,7 +3947,7 @@ class Agent:
|
|
|
3646
3947
|
user_id: Optional[str] = None,
|
|
3647
3948
|
debug_mode: Optional[bool] = None,
|
|
3648
3949
|
**kwargs: Any,
|
|
3649
|
-
) ->
|
|
3950
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
3650
3951
|
"""Execute multiple post-hook functions in succession (async version)."""
|
|
3651
3952
|
if hooks is None:
|
|
3652
3953
|
return
|
|
@@ -3665,6 +3966,15 @@ class Agent:
|
|
|
3665
3966
|
all_args.update(kwargs)
|
|
3666
3967
|
|
|
3667
3968
|
for i, hook in enumerate(hooks):
|
|
3969
|
+
yield handle_event( # type: ignore
|
|
3970
|
+
run_response=run_output,
|
|
3971
|
+
event=create_post_hook_started_event(
|
|
3972
|
+
from_run_response=run_output,
|
|
3973
|
+
post_hook_name=hook.__name__,
|
|
3974
|
+
),
|
|
3975
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3976
|
+
store_events=self.store_events,
|
|
3977
|
+
)
|
|
3668
3978
|
try:
|
|
3669
3979
|
# Filter arguments to only include those that the hook accepts
|
|
3670
3980
|
filtered_args = filter_hook_args(hook, all_args)
|
|
@@ -3674,6 +3984,16 @@ class Agent:
|
|
|
3674
3984
|
else:
|
|
3675
3985
|
hook(**filtered_args)
|
|
3676
3986
|
|
|
3987
|
+
yield handle_event( # type: ignore
|
|
3988
|
+
run_response=run_output,
|
|
3989
|
+
event=create_post_hook_completed_event(
|
|
3990
|
+
from_run_response=run_output,
|
|
3991
|
+
post_hook_name=hook.__name__,
|
|
3992
|
+
),
|
|
3993
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
3994
|
+
store_events=self.store_events,
|
|
3995
|
+
)
|
|
3996
|
+
|
|
3677
3997
|
except (InputCheckError, OutputCheckError) as e:
|
|
3678
3998
|
raise e
|
|
3679
3999
|
except Exception as e:
|
|
@@ -3686,7 +4006,6 @@ class Agent:
|
|
|
3686
4006
|
def _handle_agent_run_paused(
|
|
3687
4007
|
self,
|
|
3688
4008
|
run_response: RunOutput,
|
|
3689
|
-
run_messages: RunMessages,
|
|
3690
4009
|
session: AgentSession,
|
|
3691
4010
|
user_id: Optional[str] = None,
|
|
3692
4011
|
) -> RunOutput:
|
|
@@ -3696,18 +4015,7 @@ class Agent:
|
|
|
3696
4015
|
if not run_response.content:
|
|
3697
4016
|
run_response.content = get_paused_content(run_response)
|
|
3698
4017
|
|
|
3699
|
-
|
|
3700
|
-
self.save_run_response_to_file(
|
|
3701
|
-
run_response=run_response,
|
|
3702
|
-
input=run_messages.user_message,
|
|
3703
|
-
session_id=session.session_id,
|
|
3704
|
-
user_id=user_id,
|
|
3705
|
-
)
|
|
3706
|
-
|
|
3707
|
-
session.upsert_run(run=run_response)
|
|
3708
|
-
|
|
3709
|
-
# Save session to storage
|
|
3710
|
-
self.save_session(session=session)
|
|
4018
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
3711
4019
|
|
|
3712
4020
|
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
3713
4021
|
|
|
@@ -3717,7 +4025,6 @@ class Agent:
|
|
|
3717
4025
|
def _handle_agent_run_paused_stream(
|
|
3718
4026
|
self,
|
|
3719
4027
|
run_response: RunOutput,
|
|
3720
|
-
run_messages: RunMessages,
|
|
3721
4028
|
session: AgentSession,
|
|
3722
4029
|
user_id: Optional[str] = None,
|
|
3723
4030
|
) -> Iterator[RunOutputEvent]:
|
|
@@ -3728,26 +4035,67 @@ class Agent:
|
|
|
3728
4035
|
run_response.content = get_paused_content(run_response)
|
|
3729
4036
|
|
|
3730
4037
|
# We return and await confirmation/completion for the tools that require it
|
|
3731
|
-
pause_event =
|
|
4038
|
+
pause_event = handle_event(
|
|
3732
4039
|
create_run_paused_event(
|
|
3733
4040
|
from_run_response=run_response,
|
|
3734
4041
|
tools=run_response.tools,
|
|
3735
4042
|
),
|
|
3736
4043
|
run_response,
|
|
4044
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4045
|
+
store_events=self.store_events,
|
|
3737
4046
|
)
|
|
3738
4047
|
|
|
3739
|
-
|
|
3740
|
-
|
|
3741
|
-
|
|
3742
|
-
|
|
3743
|
-
|
|
3744
|
-
|
|
4048
|
+
self._cleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4049
|
+
|
|
4050
|
+
yield pause_event # type: ignore
|
|
4051
|
+
|
|
4052
|
+
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
4053
|
+
|
|
4054
|
+
async def _ahandle_agent_run_paused(
|
|
4055
|
+
self,
|
|
4056
|
+
run_response: RunOutput,
|
|
4057
|
+
session: AgentSession,
|
|
4058
|
+
user_id: Optional[str] = None,
|
|
4059
|
+
) -> RunOutput:
|
|
4060
|
+
# Set the run response to paused
|
|
4061
|
+
|
|
4062
|
+
run_response.status = RunStatus.paused
|
|
4063
|
+
if not run_response.content:
|
|
4064
|
+
run_response.content = get_paused_content(run_response)
|
|
4065
|
+
|
|
4066
|
+
await self._acleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4067
|
+
|
|
4068
|
+
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
4069
|
+
|
|
4070
|
+
# We return and await confirmation/completion for the tools that require it
|
|
4071
|
+
return run_response
|
|
4072
|
+
|
|
4073
|
+
async def _ahandle_agent_run_paused_stream(
|
|
4074
|
+
self,
|
|
4075
|
+
run_response: RunOutput,
|
|
4076
|
+
session: AgentSession,
|
|
4077
|
+
user_id: Optional[str] = None,
|
|
4078
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
4079
|
+
# Set the run response to paused
|
|
4080
|
+
|
|
4081
|
+
run_response.status = RunStatus.paused
|
|
4082
|
+
if not run_response.content:
|
|
4083
|
+
run_response.content = get_paused_content(run_response)
|
|
4084
|
+
|
|
4085
|
+
# We return and await confirmation/completion for the tools that require it
|
|
4086
|
+
pause_event = handle_event(
|
|
4087
|
+
create_run_paused_event(
|
|
4088
|
+
from_run_response=run_response,
|
|
4089
|
+
tools=run_response.tools,
|
|
4090
|
+
),
|
|
4091
|
+
run_response,
|
|
4092
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4093
|
+
store_events=self.store_events,
|
|
3745
4094
|
)
|
|
3746
|
-
session.upsert_run(run=run_response)
|
|
3747
|
-
# Save session to storage
|
|
3748
|
-
self.save_session(session=session)
|
|
3749
4095
|
|
|
3750
|
-
|
|
4096
|
+
await self._acleanup_and_store(run_response=run_response, session=session, user_id=user_id)
|
|
4097
|
+
|
|
4098
|
+
yield pause_event # type: ignore
|
|
3751
4099
|
|
|
3752
4100
|
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
|
|
3753
4101
|
|
|
@@ -3824,7 +4172,11 @@ class Agent:
|
|
|
3824
4172
|
)
|
|
3825
4173
|
|
|
3826
4174
|
def _run_tool(
|
|
3827
|
-
self,
|
|
4175
|
+
self,
|
|
4176
|
+
run_response: RunOutput,
|
|
4177
|
+
run_messages: RunMessages,
|
|
4178
|
+
tool: ToolExecution,
|
|
4179
|
+
stream_events: bool = False,
|
|
3828
4180
|
) -> Iterator[RunOutputEvent]:
|
|
3829
4181
|
self.model = cast(Model, self.model)
|
|
3830
4182
|
# Execute the tool
|
|
@@ -3837,23 +4189,27 @@ class Agent:
|
|
|
3837
4189
|
):
|
|
3838
4190
|
if isinstance(call_result, ModelResponse):
|
|
3839
4191
|
if call_result.event == ModelResponseEvent.tool_call_started.value:
|
|
3840
|
-
|
|
3841
|
-
|
|
3842
|
-
|
|
3843
|
-
|
|
4192
|
+
if stream_events:
|
|
4193
|
+
yield handle_event( # type: ignore
|
|
4194
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4195
|
+
run_response,
|
|
4196
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4197
|
+
store_events=self.store_events,
|
|
4198
|
+
)
|
|
3844
4199
|
|
|
3845
4200
|
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
|
|
3846
4201
|
tool_execution = call_result.tool_executions[0]
|
|
3847
4202
|
tool.result = tool_execution.result
|
|
3848
4203
|
tool.tool_call_error = tool_execution.tool_call_error
|
|
3849
|
-
|
|
3850
|
-
|
|
3851
|
-
|
|
3852
|
-
|
|
3853
|
-
|
|
3854
|
-
|
|
3855
|
-
|
|
3856
|
-
|
|
4204
|
+
if stream_events:
|
|
4205
|
+
yield handle_event( # type: ignore
|
|
4206
|
+
create_tool_call_completed_event(
|
|
4207
|
+
from_run_response=run_response, tool=tool, content=call_result.content
|
|
4208
|
+
),
|
|
4209
|
+
run_response,
|
|
4210
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4211
|
+
store_events=self.store_events,
|
|
4212
|
+
)
|
|
3857
4213
|
|
|
3858
4214
|
if len(function_call_results) > 0:
|
|
3859
4215
|
run_messages.messages.extend(function_call_results)
|
|
@@ -3873,6 +4229,7 @@ class Agent:
|
|
|
3873
4229
|
run_response: RunOutput,
|
|
3874
4230
|
run_messages: RunMessages,
|
|
3875
4231
|
tool: ToolExecution,
|
|
4232
|
+
stream_events: bool = False,
|
|
3876
4233
|
) -> AsyncIterator[RunOutputEvent]:
|
|
3877
4234
|
self.model = cast(Model, self.model)
|
|
3878
4235
|
|
|
@@ -3887,22 +4244,26 @@ class Agent:
|
|
|
3887
4244
|
):
|
|
3888
4245
|
if isinstance(call_result, ModelResponse):
|
|
3889
4246
|
if call_result.event == ModelResponseEvent.tool_call_started.value:
|
|
3890
|
-
|
|
3891
|
-
|
|
3892
|
-
|
|
3893
|
-
|
|
4247
|
+
if stream_events:
|
|
4248
|
+
yield handle_event( # type: ignore
|
|
4249
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4250
|
+
run_response,
|
|
4251
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4252
|
+
store_events=self.store_events,
|
|
4253
|
+
)
|
|
3894
4254
|
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
|
|
3895
4255
|
tool_execution = call_result.tool_executions[0]
|
|
3896
4256
|
tool.result = tool_execution.result
|
|
3897
4257
|
tool.tool_call_error = tool_execution.tool_call_error
|
|
3898
|
-
|
|
3899
|
-
|
|
3900
|
-
|
|
3901
|
-
|
|
3902
|
-
|
|
3903
|
-
|
|
3904
|
-
|
|
3905
|
-
|
|
4258
|
+
if stream_events:
|
|
4259
|
+
yield handle_event( # type: ignore
|
|
4260
|
+
create_tool_call_completed_event(
|
|
4261
|
+
from_run_response=run_response, tool=tool, content=call_result.content
|
|
4262
|
+
),
|
|
4263
|
+
run_response,
|
|
4264
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4265
|
+
store_events=self.store_events,
|
|
4266
|
+
)
|
|
3906
4267
|
if len(function_call_results) > 0:
|
|
3907
4268
|
run_messages.messages.extend(function_call_results)
|
|
3908
4269
|
|
|
@@ -3944,7 +4305,7 @@ class Agent:
|
|
|
3944
4305
|
deque(self._run_tool(run_response, run_messages, _t), maxlen=0)
|
|
3945
4306
|
|
|
3946
4307
|
def _handle_tool_call_updates_stream(
|
|
3947
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
4308
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: bool = False
|
|
3948
4309
|
) -> Iterator[RunOutputEvent]:
|
|
3949
4310
|
self.model = cast(Model, self.model)
|
|
3950
4311
|
for _t in run_response.tools or []:
|
|
@@ -3952,7 +4313,7 @@ class Agent:
|
|
|
3952
4313
|
if _t.requires_confirmation is not None and _t.requires_confirmation is True and self._functions_for_model:
|
|
3953
4314
|
# Tool is confirmed and hasn't been run before
|
|
3954
4315
|
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
|
|
3955
|
-
yield from self._run_tool(run_response, run_messages, _t)
|
|
4316
|
+
yield from self._run_tool(run_response, run_messages, _t, stream_events=stream_events)
|
|
3956
4317
|
else:
|
|
3957
4318
|
self._reject_tool_call(run_messages, _t)
|
|
3958
4319
|
_t.confirmed = False
|
|
@@ -3977,7 +4338,7 @@ class Agent:
|
|
|
3977
4338
|
# Case 4: Handle user input required tools
|
|
3978
4339
|
elif _t.requires_user_input is not None and _t.requires_user_input is True:
|
|
3979
4340
|
self._handle_user_input_update(tool=_t)
|
|
3980
|
-
yield from self._run_tool(run_response, run_messages, _t)
|
|
4341
|
+
yield from self._run_tool(run_response, run_messages, _t, stream_events=stream_events)
|
|
3981
4342
|
_t.requires_user_input = False
|
|
3982
4343
|
_t.answered = True
|
|
3983
4344
|
|
|
@@ -4018,7 +4379,7 @@ class Agent:
|
|
|
4018
4379
|
_t.answered = True
|
|
4019
4380
|
|
|
4020
4381
|
async def _ahandle_tool_call_updates_stream(
|
|
4021
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
4382
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: bool = False
|
|
4022
4383
|
) -> AsyncIterator[RunOutputEvent]:
|
|
4023
4384
|
self.model = cast(Model, self.model)
|
|
4024
4385
|
for _t in run_response.tools or []:
|
|
@@ -4026,7 +4387,7 @@ class Agent:
|
|
|
4026
4387
|
if _t.requires_confirmation is not None and _t.requires_confirmation is True and self._functions_for_model:
|
|
4027
4388
|
# Tool is confirmed and hasn't been run before
|
|
4028
4389
|
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
|
|
4029
|
-
async for event in self._arun_tool(run_response, run_messages, _t):
|
|
4390
|
+
async for event in self._arun_tool(run_response, run_messages, _t, stream_events=stream_events):
|
|
4030
4391
|
yield event
|
|
4031
4392
|
else:
|
|
4032
4393
|
self._reject_tool_call(run_messages, _t)
|
|
@@ -4050,7 +4411,7 @@ class Agent:
|
|
|
4050
4411
|
# # Case 4: Handle user input required tools
|
|
4051
4412
|
elif _t.requires_user_input is not None and _t.requires_user_input is True:
|
|
4052
4413
|
self._handle_user_input_update(tool=_t)
|
|
4053
|
-
async for event in self._arun_tool(run_response, run_messages, _t):
|
|
4414
|
+
async for event in self._arun_tool(run_response, run_messages, _t, stream_events=stream_events):
|
|
4054
4415
|
yield event
|
|
4055
4416
|
_t.requires_user_input = False
|
|
4056
4417
|
_t.answered = True
|
|
@@ -4157,7 +4518,7 @@ class Agent:
|
|
|
4157
4518
|
run_response: RunOutput,
|
|
4158
4519
|
run_messages: RunMessages,
|
|
4159
4520
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
4160
|
-
|
|
4521
|
+
stream_events: bool = False,
|
|
4161
4522
|
) -> Iterator[RunOutputEvent]:
|
|
4162
4523
|
self.model = cast(Model, self.model)
|
|
4163
4524
|
|
|
@@ -4190,11 +4551,11 @@ class Agent:
|
|
|
4190
4551
|
model_response_event=model_response_event,
|
|
4191
4552
|
reasoning_state=reasoning_state,
|
|
4192
4553
|
parse_structured_output=self.should_parse_structured_output,
|
|
4193
|
-
|
|
4554
|
+
stream_events=stream_events,
|
|
4194
4555
|
)
|
|
4195
4556
|
|
|
4196
4557
|
# Determine reasoning completed
|
|
4197
|
-
if
|
|
4558
|
+
if stream_events and reasoning_state["reasoning_started"]:
|
|
4198
4559
|
all_reasoning_steps: List[ReasoningStep] = []
|
|
4199
4560
|
if run_response and run_response.reasoning_steps:
|
|
4200
4561
|
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
|
|
@@ -4204,13 +4565,15 @@ class Agent:
|
|
|
4204
4565
|
run_response=run_response,
|
|
4205
4566
|
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
|
|
4206
4567
|
)
|
|
4207
|
-
yield
|
|
4568
|
+
yield handle_event( # type: ignore
|
|
4208
4569
|
create_reasoning_completed_event(
|
|
4209
4570
|
from_run_response=run_response,
|
|
4210
4571
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
4211
4572
|
content_type=ReasoningSteps.__name__,
|
|
4212
4573
|
),
|
|
4213
4574
|
run_response,
|
|
4575
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4576
|
+
store_events=self.store_events,
|
|
4214
4577
|
)
|
|
4215
4578
|
|
|
4216
4579
|
# Update RunOutput
|
|
@@ -4233,7 +4596,7 @@ class Agent:
|
|
|
4233
4596
|
run_response: RunOutput,
|
|
4234
4597
|
run_messages: RunMessages,
|
|
4235
4598
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
4236
|
-
|
|
4599
|
+
stream_events: bool = False,
|
|
4237
4600
|
) -> AsyncIterator[RunOutputEvent]:
|
|
4238
4601
|
self.model = cast(Model, self.model)
|
|
4239
4602
|
|
|
@@ -4268,11 +4631,11 @@ class Agent:
|
|
|
4268
4631
|
model_response_event=model_response_event,
|
|
4269
4632
|
reasoning_state=reasoning_state,
|
|
4270
4633
|
parse_structured_output=self.should_parse_structured_output,
|
|
4271
|
-
|
|
4634
|
+
stream_events=stream_events,
|
|
4272
4635
|
):
|
|
4273
4636
|
yield event
|
|
4274
4637
|
|
|
4275
|
-
if
|
|
4638
|
+
if stream_events and reasoning_state["reasoning_started"]:
|
|
4276
4639
|
all_reasoning_steps: List[ReasoningStep] = []
|
|
4277
4640
|
if run_response and run_response.reasoning_steps:
|
|
4278
4641
|
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
|
|
@@ -4282,13 +4645,15 @@ class Agent:
|
|
|
4282
4645
|
run_response=run_response,
|
|
4283
4646
|
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
|
|
4284
4647
|
)
|
|
4285
|
-
yield
|
|
4648
|
+
yield handle_event( # type: ignore
|
|
4286
4649
|
create_reasoning_completed_event(
|
|
4287
4650
|
from_run_response=run_response,
|
|
4288
4651
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
4289
4652
|
content_type=ReasoningSteps.__name__,
|
|
4290
4653
|
),
|
|
4291
4654
|
run_response,
|
|
4655
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4656
|
+
store_events=self.store_events,
|
|
4292
4657
|
)
|
|
4293
4658
|
|
|
4294
4659
|
# Update RunOutput
|
|
@@ -4313,7 +4678,7 @@ class Agent:
|
|
|
4313
4678
|
model_response_event: Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent],
|
|
4314
4679
|
reasoning_state: Optional[Dict[str, Any]] = None,
|
|
4315
4680
|
parse_structured_output: bool = False,
|
|
4316
|
-
|
|
4681
|
+
stream_events: bool = False,
|
|
4317
4682
|
) -> Iterator[RunOutputEvent]:
|
|
4318
4683
|
if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
|
|
4319
4684
|
model_response_event, tuple(get_args(TeamRunOutputEvent))
|
|
@@ -4325,7 +4690,12 @@ class Agent:
|
|
|
4325
4690
|
model_response_event.run_id = run_response.run_id # type: ignore
|
|
4326
4691
|
|
|
4327
4692
|
# We just bubble the event up
|
|
4328
|
-
yield
|
|
4693
|
+
yield handle_event( # type: ignore
|
|
4694
|
+
model_response_event, # type: ignore
|
|
4695
|
+
run_response,
|
|
4696
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4697
|
+
store_events=self.store_events,
|
|
4698
|
+
)
|
|
4329
4699
|
else:
|
|
4330
4700
|
model_response_event = cast(ModelResponse, model_response_event)
|
|
4331
4701
|
# If the model response is an assistant_response, yield a RunOutput
|
|
@@ -4370,13 +4740,15 @@ class Agent:
|
|
|
4370
4740
|
|
|
4371
4741
|
# Only yield if we have content to show
|
|
4372
4742
|
if content_type != "str":
|
|
4373
|
-
yield
|
|
4743
|
+
yield handle_event( # type: ignore
|
|
4374
4744
|
create_run_output_content_event(
|
|
4375
4745
|
from_run_response=run_response,
|
|
4376
4746
|
content=model_response.content,
|
|
4377
4747
|
content_type=content_type,
|
|
4378
4748
|
),
|
|
4379
4749
|
run_response,
|
|
4750
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4751
|
+
store_events=self.store_events,
|
|
4380
4752
|
)
|
|
4381
4753
|
elif (
|
|
4382
4754
|
model_response_event.content is not None
|
|
@@ -4385,7 +4757,7 @@ class Agent:
|
|
|
4385
4757
|
or model_response_event.citations is not None
|
|
4386
4758
|
or model_response_event.provider_data is not None
|
|
4387
4759
|
):
|
|
4388
|
-
yield
|
|
4760
|
+
yield handle_event( # type: ignore
|
|
4389
4761
|
create_run_output_content_event(
|
|
4390
4762
|
from_run_response=run_response,
|
|
4391
4763
|
content=model_response_event.content,
|
|
@@ -4395,6 +4767,8 @@ class Agent:
|
|
|
4395
4767
|
model_provider_data=model_response_event.provider_data,
|
|
4396
4768
|
),
|
|
4397
4769
|
run_response,
|
|
4770
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4771
|
+
store_events=self.store_events,
|
|
4398
4772
|
)
|
|
4399
4773
|
|
|
4400
4774
|
# Process audio
|
|
@@ -4449,21 +4823,25 @@ class Agent:
|
|
|
4449
4823
|
)
|
|
4450
4824
|
run_response.created_at = model_response_event.created_at
|
|
4451
4825
|
|
|
4452
|
-
yield
|
|
4826
|
+
yield handle_event( # type: ignore
|
|
4453
4827
|
create_run_output_content_event(
|
|
4454
4828
|
from_run_response=run_response,
|
|
4455
4829
|
response_audio=run_response.response_audio,
|
|
4456
4830
|
),
|
|
4457
4831
|
run_response,
|
|
4832
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4833
|
+
store_events=self.store_events,
|
|
4458
4834
|
)
|
|
4459
4835
|
|
|
4460
4836
|
if model_response_event.images is not None:
|
|
4461
|
-
yield
|
|
4837
|
+
yield handle_event( # type: ignore
|
|
4462
4838
|
create_run_output_content_event(
|
|
4463
4839
|
from_run_response=run_response,
|
|
4464
4840
|
image=model_response_event.images[-1],
|
|
4465
4841
|
),
|
|
4466
4842
|
run_response,
|
|
4843
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4844
|
+
store_events=self.store_events,
|
|
4467
4845
|
)
|
|
4468
4846
|
|
|
4469
4847
|
if model_response.images is None:
|
|
@@ -4498,11 +4876,14 @@ class Agent:
|
|
|
4498
4876
|
run_response.tools.extend(tool_executions_list)
|
|
4499
4877
|
|
|
4500
4878
|
# Yield each tool call started event
|
|
4501
|
-
|
|
4502
|
-
|
|
4503
|
-
|
|
4504
|
-
|
|
4505
|
-
|
|
4879
|
+
if stream_events:
|
|
4880
|
+
for tool in tool_executions_list:
|
|
4881
|
+
yield handle_event( # type: ignore
|
|
4882
|
+
create_tool_call_started_event(from_run_response=run_response, tool=tool),
|
|
4883
|
+
run_response,
|
|
4884
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4885
|
+
store_events=self.store_events,
|
|
4886
|
+
)
|
|
4506
4887
|
|
|
4507
4888
|
# If the model response is a tool_call_completed, update the existing tool call in the run_response
|
|
4508
4889
|
elif model_response_event.event == ModelResponseEvent.tool_call_completed.value:
|
|
@@ -4566,159 +4947,81 @@ class Agent:
|
|
|
4566
4947
|
"reasoning_time_taken"
|
|
4567
4948
|
] + float(tool_call_metrics.duration)
|
|
4568
4949
|
|
|
4569
|
-
|
|
4570
|
-
|
|
4571
|
-
|
|
4572
|
-
|
|
4573
|
-
|
|
4574
|
-
|
|
4575
|
-
|
|
4576
|
-
|
|
4950
|
+
if stream_events:
|
|
4951
|
+
yield handle_event( # type: ignore
|
|
4952
|
+
create_tool_call_completed_event(
|
|
4953
|
+
from_run_response=run_response, tool=tool_call, content=model_response_event.content
|
|
4954
|
+
),
|
|
4955
|
+
run_response,
|
|
4956
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4957
|
+
store_events=self.store_events,
|
|
4958
|
+
)
|
|
4577
4959
|
|
|
4578
|
-
if
|
|
4960
|
+
if stream_events:
|
|
4579
4961
|
if reasoning_step is not None:
|
|
4580
4962
|
if reasoning_state and not reasoning_state["reasoning_started"]:
|
|
4581
|
-
yield
|
|
4963
|
+
yield handle_event( # type: ignore
|
|
4582
4964
|
create_reasoning_started_event(from_run_response=run_response),
|
|
4583
4965
|
run_response,
|
|
4966
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4967
|
+
store_events=self.store_events,
|
|
4584
4968
|
)
|
|
4585
4969
|
reasoning_state["reasoning_started"] = True
|
|
4586
4970
|
|
|
4587
|
-
yield
|
|
4971
|
+
yield handle_event( # type: ignore
|
|
4588
4972
|
create_reasoning_step_event(
|
|
4589
4973
|
from_run_response=run_response,
|
|
4590
4974
|
reasoning_step=reasoning_step,
|
|
4591
4975
|
reasoning_content=run_response.reasoning_content or "",
|
|
4592
4976
|
),
|
|
4593
4977
|
run_response,
|
|
4978
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
4979
|
+
store_events=self.store_events,
|
|
4594
4980
|
)
|
|
4595
4981
|
|
|
4596
|
-
def
|
|
4982
|
+
def _make_cultural_knowledge(
|
|
4597
4983
|
self,
|
|
4598
|
-
run_response: RunOutput,
|
|
4599
4984
|
run_messages: RunMessages,
|
|
4600
|
-
|
|
4601
|
-
|
|
4602
|
-
|
|
4603
|
-
|
|
4604
|
-
|
|
4605
|
-
|
|
4606
|
-
|
|
4607
|
-
|
|
4608
|
-
user_message_str = (
|
|
4609
|
-
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
4610
|
-
)
|
|
4611
|
-
|
|
4612
|
-
# Create user memories
|
|
4613
|
-
if user_message_str is not None and self.memory_manager is not None and not self.enable_agentic_memory:
|
|
4614
|
-
log_debug("Creating user memories.")
|
|
4615
|
-
futures.append(
|
|
4616
|
-
executor.submit(
|
|
4617
|
-
self.memory_manager.create_user_memories,
|
|
4618
|
-
message=user_message_str,
|
|
4619
|
-
user_id=user_id,
|
|
4620
|
-
agent_id=self.id,
|
|
4621
|
-
)
|
|
4622
|
-
)
|
|
4623
|
-
|
|
4624
|
-
# Parse messages if provided
|
|
4625
|
-
if (
|
|
4626
|
-
self.enable_user_memories
|
|
4627
|
-
and run_messages.extra_messages is not None
|
|
4628
|
-
and len(run_messages.extra_messages) > 0
|
|
4629
|
-
):
|
|
4630
|
-
parsed_messages = []
|
|
4631
|
-
for _im in run_messages.extra_messages:
|
|
4632
|
-
if isinstance(_im, Message):
|
|
4633
|
-
parsed_messages.append(_im)
|
|
4634
|
-
elif isinstance(_im, dict):
|
|
4635
|
-
try:
|
|
4636
|
-
parsed_messages.append(Message(**_im))
|
|
4637
|
-
except Exception as e:
|
|
4638
|
-
log_warning(f"Failed to validate message during memory update: {e}")
|
|
4639
|
-
else:
|
|
4640
|
-
log_warning(f"Unsupported message type: {type(_im)}")
|
|
4641
|
-
continue
|
|
4642
|
-
|
|
4643
|
-
if len(parsed_messages) > 0 and self.memory_manager is not None:
|
|
4644
|
-
futures.append(
|
|
4645
|
-
executor.submit(
|
|
4646
|
-
self.memory_manager.create_user_memories,
|
|
4647
|
-
messages=parsed_messages,
|
|
4648
|
-
user_id=user_id,
|
|
4649
|
-
agent_id=self.id,
|
|
4650
|
-
)
|
|
4651
|
-
)
|
|
4652
|
-
else:
|
|
4653
|
-
log_warning("Unable to add messages to memory")
|
|
4654
|
-
|
|
4655
|
-
# Create cultural knowledge
|
|
4656
|
-
if user_message_str is not None and self.culture_manager is not None and self.update_cultural_knowledge:
|
|
4657
|
-
log_debug("Creating cultural knowledge.")
|
|
4658
|
-
futures.append(
|
|
4659
|
-
executor.submit(
|
|
4660
|
-
self.culture_manager.create_cultural_knowledge,
|
|
4661
|
-
message=user_message_str,
|
|
4662
|
-
)
|
|
4663
|
-
)
|
|
4664
|
-
|
|
4665
|
-
# Create session summary
|
|
4666
|
-
if self.session_summary_manager is not None:
|
|
4667
|
-
log_debug("Creating session summary.")
|
|
4668
|
-
futures.append(
|
|
4669
|
-
executor.submit(
|
|
4670
|
-
self.session_summary_manager.create_session_summary, # type: ignore
|
|
4671
|
-
session=session,
|
|
4672
|
-
)
|
|
4673
|
-
)
|
|
4674
|
-
|
|
4675
|
-
if futures:
|
|
4676
|
-
if self.stream_intermediate_steps:
|
|
4677
|
-
yield self._handle_event(
|
|
4678
|
-
create_memory_update_started_event(from_run_response=run_response),
|
|
4679
|
-
run_response,
|
|
4680
|
-
)
|
|
4681
|
-
|
|
4682
|
-
# Wait for all operations to complete and handle any errors
|
|
4683
|
-
for future in as_completed(futures):
|
|
4684
|
-
try:
|
|
4685
|
-
future.result()
|
|
4686
|
-
except Exception as e:
|
|
4687
|
-
log_warning(f"Error in memory/summary operation: {str(e)}")
|
|
4985
|
+
):
|
|
4986
|
+
if (
|
|
4987
|
+
run_messages.user_message is not None
|
|
4988
|
+
and self.culture_manager is not None
|
|
4989
|
+
and self.update_cultural_knowledge
|
|
4990
|
+
):
|
|
4991
|
+
log_debug("Creating cultural knowledge.")
|
|
4992
|
+
self.culture_manager.create_cultural_knowledge(message=run_messages.user_message.get_content_string())
|
|
4688
4993
|
|
|
4689
|
-
|
|
4690
|
-
|
|
4691
|
-
|
|
4692
|
-
|
|
4693
|
-
|
|
4994
|
+
async def _acreate_cultural_knowledge(
|
|
4995
|
+
self,
|
|
4996
|
+
run_messages: RunMessages,
|
|
4997
|
+
):
|
|
4998
|
+
if (
|
|
4999
|
+
run_messages.user_message is not None
|
|
5000
|
+
and self.culture_manager is not None
|
|
5001
|
+
and self.update_cultural_knowledge
|
|
5002
|
+
):
|
|
5003
|
+
log_debug("Creating cultural knowledge.")
|
|
5004
|
+
await self.culture_manager.acreate_cultural_knowledge(
|
|
5005
|
+
message=run_messages.user_message.get_content_string()
|
|
5006
|
+
)
|
|
4694
5007
|
|
|
4695
|
-
|
|
5008
|
+
def _make_memories(
|
|
4696
5009
|
self,
|
|
4697
|
-
run_response: RunOutput,
|
|
4698
5010
|
run_messages: RunMessages,
|
|
4699
|
-
session: AgentSession,
|
|
4700
5011
|
user_id: Optional[str] = None,
|
|
4701
|
-
)
|
|
4702
|
-
|
|
4703
|
-
|
|
4704
|
-
|
|
4705
|
-
if
|
|
5012
|
+
):
|
|
5013
|
+
user_message_str = (
|
|
5014
|
+
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5015
|
+
)
|
|
5016
|
+
if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
|
|
4706
5017
|
log_debug("Creating user memories.")
|
|
4707
|
-
|
|
4708
|
-
|
|
4709
|
-
|
|
4710
|
-
|
|
4711
|
-
user_id=user_id,
|
|
4712
|
-
agent_id=self.id,
|
|
4713
|
-
)
|
|
5018
|
+
self.memory_manager.create_user_memories( # type: ignore
|
|
5019
|
+
message=user_message_str,
|
|
5020
|
+
user_id=user_id,
|
|
5021
|
+
agent_id=self.id,
|
|
4714
5022
|
)
|
|
4715
5023
|
|
|
4716
|
-
|
|
4717
|
-
if (
|
|
4718
|
-
self.memory_manager is not None
|
|
4719
|
-
and run_messages.extra_messages is not None
|
|
4720
|
-
and len(run_messages.extra_messages) > 0
|
|
4721
|
-
):
|
|
5024
|
+
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
|
|
4722
5025
|
parsed_messages = []
|
|
4723
5026
|
for _im in run_messages.extra_messages:
|
|
4724
5027
|
if isinstance(_im, Message):
|
|
@@ -4732,54 +5035,59 @@ class Agent:
|
|
|
4732
5035
|
log_warning(f"Unsupported message type: {type(_im)}")
|
|
4733
5036
|
continue
|
|
4734
5037
|
|
|
4735
|
-
|
|
4736
|
-
|
|
4737
|
-
|
|
4738
|
-
|
|
4739
|
-
|
|
4740
|
-
|
|
5038
|
+
# Filter out messages with empty content before passing to memory manager
|
|
5039
|
+
non_empty_messages = [
|
|
5040
|
+
msg
|
|
5041
|
+
for msg in parsed_messages
|
|
5042
|
+
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5043
|
+
]
|
|
5044
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5045
|
+
self.memory_manager.create_user_memories(messages=non_empty_messages, user_id=user_id, agent_id=self.id) # type: ignore
|
|
4741
5046
|
else:
|
|
4742
5047
|
log_warning("Unable to add messages to memory")
|
|
4743
5048
|
|
|
4744
|
-
|
|
4745
|
-
|
|
4746
|
-
|
|
4747
|
-
|
|
4748
|
-
|
|
4749
|
-
|
|
4750
|
-
|
|
4751
|
-
|
|
4752
|
-
|
|
4753
|
-
|
|
4754
|
-
|
|
4755
|
-
|
|
4756
|
-
|
|
4757
|
-
|
|
4758
|
-
log_debug("Creating session summary.")
|
|
4759
|
-
tasks.append(
|
|
4760
|
-
self.session_summary_manager.acreate_session_summary(
|
|
4761
|
-
session=session,
|
|
4762
|
-
)
|
|
5049
|
+
async def _amake_memories(
|
|
5050
|
+
self,
|
|
5051
|
+
run_messages: RunMessages,
|
|
5052
|
+
user_id: Optional[str] = None,
|
|
5053
|
+
):
|
|
5054
|
+
user_message_str = (
|
|
5055
|
+
run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
|
|
5056
|
+
)
|
|
5057
|
+
if user_message_str is not None and user_message_str.strip() != "" and self.memory_manager is not None:
|
|
5058
|
+
log_debug("Creating user memories.")
|
|
5059
|
+
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5060
|
+
message=user_message_str,
|
|
5061
|
+
user_id=user_id,
|
|
5062
|
+
agent_id=self.id,
|
|
4763
5063
|
)
|
|
4764
5064
|
|
|
4765
|
-
if
|
|
4766
|
-
|
|
4767
|
-
|
|
4768
|
-
|
|
4769
|
-
|
|
4770
|
-
)
|
|
4771
|
-
|
|
4772
|
-
|
|
4773
|
-
|
|
4774
|
-
|
|
4775
|
-
|
|
4776
|
-
|
|
5065
|
+
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
|
|
5066
|
+
parsed_messages = []
|
|
5067
|
+
for _im in run_messages.extra_messages:
|
|
5068
|
+
if isinstance(_im, Message):
|
|
5069
|
+
parsed_messages.append(_im)
|
|
5070
|
+
elif isinstance(_im, dict):
|
|
5071
|
+
try:
|
|
5072
|
+
parsed_messages.append(Message(**_im))
|
|
5073
|
+
except Exception as e:
|
|
5074
|
+
log_warning(f"Failed to validate message during memory update: {e}")
|
|
5075
|
+
else:
|
|
5076
|
+
log_warning(f"Unsupported message type: {type(_im)}")
|
|
5077
|
+
continue
|
|
4777
5078
|
|
|
4778
|
-
|
|
4779
|
-
|
|
4780
|
-
|
|
4781
|
-
|
|
5079
|
+
# Filter out messages with empty content before passing to memory manager
|
|
5080
|
+
non_empty_messages = [
|
|
5081
|
+
msg
|
|
5082
|
+
for msg in parsed_messages
|
|
5083
|
+
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
|
|
5084
|
+
]
|
|
5085
|
+
if len(non_empty_messages) > 0 and self.memory_manager is not None:
|
|
5086
|
+
await self.memory_manager.acreate_user_memories( # type: ignore
|
|
5087
|
+
messages=non_empty_messages, user_id=user_id, agent_id=self.id
|
|
4782
5088
|
)
|
|
5089
|
+
else:
|
|
5090
|
+
log_warning("Unable to add messages to memory")
|
|
4783
5091
|
|
|
4784
5092
|
def _raise_if_async_tools(self) -> None:
|
|
4785
5093
|
"""Raise an exception if any tools contain async functions"""
|
|
@@ -4878,7 +5186,7 @@ class Agent:
|
|
|
4878
5186
|
self._rebuild_tools = True
|
|
4879
5187
|
|
|
4880
5188
|
if self.enable_agentic_state:
|
|
4881
|
-
agent_tools.append(self.
|
|
5189
|
+
agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
|
|
4882
5190
|
|
|
4883
5191
|
# Add tools for accessing knowledge
|
|
4884
5192
|
if self.knowledge is not None or self.knowledge_retriever is not None:
|
|
@@ -4976,7 +5284,7 @@ class Agent:
|
|
|
4976
5284
|
self._rebuild_tools = True
|
|
4977
5285
|
|
|
4978
5286
|
if self.enable_agentic_state:
|
|
4979
|
-
agent_tools.append(self.
|
|
5287
|
+
agent_tools.append(Function(name="update_session_state", entrypoint=self._update_session_state_tool))
|
|
4980
5288
|
|
|
4981
5289
|
# Add tools for accessing knowledge
|
|
4982
5290
|
if self.knowledge is not None or self.knowledge_retriever is not None:
|
|
@@ -5014,137 +5322,6 @@ class Agent:
|
|
|
5014
5322
|
|
|
5015
5323
|
return agent_tools
|
|
5016
5324
|
|
|
5017
|
-
def _collect_joint_images(
|
|
5018
|
-
self,
|
|
5019
|
-
run_input: Optional[RunInput] = None,
|
|
5020
|
-
session: Optional[AgentSession] = None,
|
|
5021
|
-
) -> Optional[Sequence[Image]]:
|
|
5022
|
-
"""Collect images from input, session history, and current run response."""
|
|
5023
|
-
joint_images: List[Image] = []
|
|
5024
|
-
|
|
5025
|
-
# 1. Add images from current input
|
|
5026
|
-
if run_input and run_input.images:
|
|
5027
|
-
joint_images.extend(run_input.images)
|
|
5028
|
-
log_debug(f"Added {len(run_input.images)} input images to joint list")
|
|
5029
|
-
|
|
5030
|
-
# 2. Add images from session history (from both input and generated sources)
|
|
5031
|
-
try:
|
|
5032
|
-
if session and session.runs:
|
|
5033
|
-
for historical_run in session.runs:
|
|
5034
|
-
# Add generated images from previous runs
|
|
5035
|
-
if historical_run.images:
|
|
5036
|
-
joint_images.extend(historical_run.images)
|
|
5037
|
-
log_debug(
|
|
5038
|
-
f"Added {len(historical_run.images)} generated images from historical run {historical_run.run_id}"
|
|
5039
|
-
)
|
|
5040
|
-
|
|
5041
|
-
# Add input images from previous runs
|
|
5042
|
-
if historical_run.input and historical_run.input.images:
|
|
5043
|
-
joint_images.extend(historical_run.input.images)
|
|
5044
|
-
log_debug(
|
|
5045
|
-
f"Added {len(historical_run.input.images)} input images from historical run {historical_run.run_id}"
|
|
5046
|
-
)
|
|
5047
|
-
except Exception as e:
|
|
5048
|
-
log_debug(f"Could not access session history for images: {e}")
|
|
5049
|
-
|
|
5050
|
-
if joint_images:
|
|
5051
|
-
log_debug(f"Images Available to Model: {len(joint_images)} images")
|
|
5052
|
-
return joint_images if joint_images else None
|
|
5053
|
-
|
|
5054
|
-
def _collect_joint_videos(
|
|
5055
|
-
self,
|
|
5056
|
-
run_input: Optional[RunInput] = None,
|
|
5057
|
-
session: Optional[AgentSession] = None,
|
|
5058
|
-
) -> Optional[Sequence[Video]]:
|
|
5059
|
-
"""Collect videos from input, session history, and current run response."""
|
|
5060
|
-
joint_videos: List[Video] = []
|
|
5061
|
-
|
|
5062
|
-
# 1. Add videos from current input
|
|
5063
|
-
if run_input and run_input.videos:
|
|
5064
|
-
joint_videos.extend(run_input.videos)
|
|
5065
|
-
log_debug(f"Added {len(run_input.videos)} input videos to joint list")
|
|
5066
|
-
|
|
5067
|
-
# 2. Add videos from session history (from both input and generated sources)
|
|
5068
|
-
try:
|
|
5069
|
-
if session and session.runs:
|
|
5070
|
-
for historical_run in session.runs:
|
|
5071
|
-
# Add generated videos from previous runs
|
|
5072
|
-
if historical_run.videos:
|
|
5073
|
-
joint_videos.extend(historical_run.videos)
|
|
5074
|
-
log_debug(
|
|
5075
|
-
f"Added {len(historical_run.videos)} generated videos from historical run {historical_run.run_id}"
|
|
5076
|
-
)
|
|
5077
|
-
|
|
5078
|
-
# Add input videos from previous runs
|
|
5079
|
-
if historical_run.input and historical_run.input.videos:
|
|
5080
|
-
joint_videos.extend(historical_run.input.videos)
|
|
5081
|
-
log_debug(
|
|
5082
|
-
f"Added {len(historical_run.input.videos)} input videos from historical run {historical_run.run_id}"
|
|
5083
|
-
)
|
|
5084
|
-
except Exception as e:
|
|
5085
|
-
log_debug(f"Could not access session history for videos: {e}")
|
|
5086
|
-
|
|
5087
|
-
if joint_videos:
|
|
5088
|
-
log_debug(f"Videos Available to Model: {len(joint_videos)} videos")
|
|
5089
|
-
return joint_videos if joint_videos else None
|
|
5090
|
-
|
|
5091
|
-
def _collect_joint_audios(
|
|
5092
|
-
self,
|
|
5093
|
-
run_input: Optional[RunInput] = None,
|
|
5094
|
-
session: Optional[AgentSession] = None,
|
|
5095
|
-
) -> Optional[Sequence[Audio]]:
|
|
5096
|
-
"""Collect audios from input, session history, and current run response."""
|
|
5097
|
-
joint_audios: List[Audio] = []
|
|
5098
|
-
|
|
5099
|
-
# 1. Add audios from current input
|
|
5100
|
-
if run_input and run_input.audios:
|
|
5101
|
-
joint_audios.extend(run_input.audios)
|
|
5102
|
-
log_debug(f"Added {len(run_input.audios)} input audios to joint list")
|
|
5103
|
-
|
|
5104
|
-
# 2. Add audios from session history (from both input and generated sources)
|
|
5105
|
-
try:
|
|
5106
|
-
if session and session.runs:
|
|
5107
|
-
for historical_run in session.runs:
|
|
5108
|
-
# Add generated audios from previous runs
|
|
5109
|
-
if historical_run.audio:
|
|
5110
|
-
joint_audios.extend(historical_run.audio)
|
|
5111
|
-
log_debug(
|
|
5112
|
-
f"Added {len(historical_run.audio)} generated audios from historical run {historical_run.run_id}"
|
|
5113
|
-
)
|
|
5114
|
-
|
|
5115
|
-
# Add input audios from previous runs
|
|
5116
|
-
if historical_run.input and historical_run.input.audios:
|
|
5117
|
-
joint_audios.extend(historical_run.input.audios)
|
|
5118
|
-
log_debug(
|
|
5119
|
-
f"Added {len(historical_run.input.audios)} input audios from historical run {historical_run.run_id}"
|
|
5120
|
-
)
|
|
5121
|
-
except Exception as e:
|
|
5122
|
-
log_debug(f"Could not access session history for audios: {e}")
|
|
5123
|
-
|
|
5124
|
-
if joint_audios:
|
|
5125
|
-
log_debug(f"Audios Available to Model: {len(joint_audios)} audios")
|
|
5126
|
-
return joint_audios if joint_audios else None
|
|
5127
|
-
|
|
5128
|
-
def _collect_joint_files(
|
|
5129
|
-
self,
|
|
5130
|
-
run_input: Optional[RunInput] = None,
|
|
5131
|
-
) -> Optional[Sequence[File]]:
|
|
5132
|
-
"""Collect files from input and session history."""
|
|
5133
|
-
from agno.utils.log import log_debug
|
|
5134
|
-
|
|
5135
|
-
joint_files: List[File] = []
|
|
5136
|
-
|
|
5137
|
-
# 1. Add files from current input
|
|
5138
|
-
if run_input and run_input.files:
|
|
5139
|
-
joint_files.extend(run_input.files)
|
|
5140
|
-
|
|
5141
|
-
# TODO: Files aren't stored in session history yet and dont have a FileArtifact
|
|
5142
|
-
|
|
5143
|
-
if joint_files:
|
|
5144
|
-
log_debug(f"Files Available to Model: {len(joint_files)} files")
|
|
5145
|
-
|
|
5146
|
-
return joint_files if joint_files else None
|
|
5147
|
-
|
|
5148
5325
|
def _determine_tools_for_model(
|
|
5149
5326
|
self,
|
|
5150
5327
|
model: Model,
|
|
@@ -5254,10 +5431,10 @@ class Agent:
|
|
|
5254
5431
|
)
|
|
5255
5432
|
|
|
5256
5433
|
# Only collect media if functions actually need them
|
|
5257
|
-
joint_images =
|
|
5258
|
-
joint_files =
|
|
5259
|
-
joint_audios =
|
|
5260
|
-
joint_videos =
|
|
5434
|
+
joint_images = collect_joint_images(run_response.input, session) if needs_media else None
|
|
5435
|
+
joint_files = collect_joint_files(run_response.input) if needs_media else None
|
|
5436
|
+
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None
|
|
5437
|
+
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None
|
|
5261
5438
|
|
|
5262
5439
|
for func in self._functions_for_model.values():
|
|
5263
5440
|
func._session_state = session_state
|
|
@@ -5376,10 +5553,10 @@ class Agent:
|
|
|
5376
5553
|
)
|
|
5377
5554
|
|
|
5378
5555
|
# Only collect media if functions actually need them
|
|
5379
|
-
joint_images =
|
|
5380
|
-
joint_files =
|
|
5381
|
-
joint_audios =
|
|
5382
|
-
joint_videos =
|
|
5556
|
+
joint_images = collect_joint_images(run_response.input, session) if needs_media else None
|
|
5557
|
+
joint_files = collect_joint_files(run_response.input) if needs_media else None
|
|
5558
|
+
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None
|
|
5559
|
+
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None
|
|
5383
5560
|
|
|
5384
5561
|
for func in self._functions_for_model.values():
|
|
5385
5562
|
func._session_state = session_state
|
|
@@ -5488,17 +5665,21 @@ class Agent:
|
|
|
5488
5665
|
return agent_data
|
|
5489
5666
|
|
|
5490
5667
|
# -*- Session Database Functions
|
|
5491
|
-
def _read_session(
|
|
5668
|
+
def _read_session(
|
|
5669
|
+
self, session_id: str, session_type: SessionType = SessionType.AGENT
|
|
5670
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
|
|
5492
5671
|
"""Get a Session from the database."""
|
|
5493
5672
|
try:
|
|
5494
5673
|
if not self.db:
|
|
5495
5674
|
raise ValueError("Db not initialized")
|
|
5496
|
-
return self.db.get_session(session_id=session_id, session_type=
|
|
5675
|
+
return self.db.get_session(session_id=session_id, session_type=session_type) # type: ignore
|
|
5497
5676
|
except Exception as e:
|
|
5498
5677
|
log_warning(f"Error getting session from db: {e}")
|
|
5499
5678
|
return None
|
|
5500
5679
|
|
|
5501
|
-
async def _aread_session(
|
|
5680
|
+
async def _aread_session(
|
|
5681
|
+
self, session_id: str, session_type: SessionType = SessionType.AGENT
|
|
5682
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
|
|
5502
5683
|
"""Get a Session from the database."""
|
|
5503
5684
|
try:
|
|
5504
5685
|
if not self.db:
|
|
@@ -5599,12 +5780,17 @@ class Agent:
|
|
|
5599
5780
|
if agent_session is None:
|
|
5600
5781
|
# Creating new session if none found
|
|
5601
5782
|
log_debug(f"Creating new AgentSession: {session_id}")
|
|
5783
|
+
session_data = {}
|
|
5784
|
+
if self.session_state is not None:
|
|
5785
|
+
from copy import deepcopy
|
|
5786
|
+
|
|
5787
|
+
session_data["session_state"] = deepcopy(self.session_state)
|
|
5602
5788
|
agent_session = AgentSession(
|
|
5603
5789
|
session_id=session_id,
|
|
5604
5790
|
agent_id=self.id,
|
|
5605
5791
|
user_id=user_id,
|
|
5606
5792
|
agent_data=self._get_agent_data(),
|
|
5607
|
-
session_data=
|
|
5793
|
+
session_data=session_data,
|
|
5608
5794
|
metadata=self.metadata,
|
|
5609
5795
|
created_at=int(time()),
|
|
5610
5796
|
)
|
|
@@ -5629,18 +5815,25 @@ class Agent:
|
|
|
5629
5815
|
agent_session = None
|
|
5630
5816
|
if self.db is not None and self.team_id is None and self.workflow_id is None:
|
|
5631
5817
|
log_debug(f"Reading AgentSession: {session_id}")
|
|
5632
|
-
|
|
5633
|
-
|
|
5818
|
+
if self._has_async_db():
|
|
5819
|
+
agent_session = cast(AgentSession, await self._aread_session(session_id=session_id))
|
|
5820
|
+
else:
|
|
5821
|
+
agent_session = cast(AgentSession, self._read_session(session_id=session_id))
|
|
5634
5822
|
|
|
5635
5823
|
if agent_session is None:
|
|
5636
5824
|
# Creating new session if none found
|
|
5637
5825
|
log_debug(f"Creating new AgentSession: {session_id}")
|
|
5826
|
+
session_data = {}
|
|
5827
|
+
if self.session_state is not None:
|
|
5828
|
+
from copy import deepcopy
|
|
5829
|
+
|
|
5830
|
+
session_data["session_state"] = deepcopy(self.session_state)
|
|
5638
5831
|
agent_session = AgentSession(
|
|
5639
5832
|
session_id=session_id,
|
|
5640
5833
|
agent_id=self.id,
|
|
5641
5834
|
user_id=user_id,
|
|
5642
5835
|
agent_data=self._get_agent_data(),
|
|
5643
|
-
session_data=
|
|
5836
|
+
session_data=session_data,
|
|
5644
5837
|
metadata=self.metadata,
|
|
5645
5838
|
created_at=int(time()),
|
|
5646
5839
|
)
|
|
@@ -5666,13 +5859,13 @@ class Agent:
|
|
|
5666
5859
|
log_warning(f"RunOutput {run_id} not found in AgentSession {self._agent_session.session_id}")
|
|
5667
5860
|
return None
|
|
5668
5861
|
else:
|
|
5669
|
-
|
|
5670
|
-
if
|
|
5671
|
-
run_response =
|
|
5862
|
+
session = self.get_session(session_id=session_id)
|
|
5863
|
+
if session is not None:
|
|
5864
|
+
run_response = session.get_run(run_id=run_id)
|
|
5672
5865
|
if run_response is not None:
|
|
5673
5866
|
return run_response
|
|
5674
5867
|
else:
|
|
5675
|
-
log_warning(f"RunOutput {run_id} not found in
|
|
5868
|
+
log_warning(f"RunOutput {run_id} not found in Session {session_id}")
|
|
5676
5869
|
return None
|
|
5677
5870
|
|
|
5678
5871
|
def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[RunOutput]:
|
|
@@ -5690,17 +5883,17 @@ class Agent:
|
|
|
5690
5883
|
and self._agent_session.runs is not None
|
|
5691
5884
|
and len(self._agent_session.runs) > 0
|
|
5692
5885
|
):
|
|
5693
|
-
|
|
5694
|
-
|
|
5695
|
-
|
|
5886
|
+
for run_output in reversed(self._agent_session.runs):
|
|
5887
|
+
if hasattr(run_output, "agent_id") and run_output.agent_id == self.id:
|
|
5888
|
+
return run_output
|
|
5696
5889
|
else:
|
|
5697
|
-
|
|
5698
|
-
if
|
|
5699
|
-
|
|
5700
|
-
|
|
5701
|
-
|
|
5890
|
+
session = self.get_session(session_id=session_id)
|
|
5891
|
+
if session is not None and session.runs is not None and len(session.runs) > 0:
|
|
5892
|
+
for run_output in reversed(session.runs):
|
|
5893
|
+
if hasattr(run_output, "agent_id") and run_output.agent_id == self.id:
|
|
5894
|
+
return run_output
|
|
5702
5895
|
else:
|
|
5703
|
-
log_warning(f"No run responses found in
|
|
5896
|
+
log_warning(f"No run responses found in Session {session_id}")
|
|
5704
5897
|
return None
|
|
5705
5898
|
|
|
5706
5899
|
def cancel_run(self, run_id: str) -> bool:
|
|
@@ -5738,7 +5931,65 @@ class Agent:
|
|
|
5738
5931
|
|
|
5739
5932
|
# Load and return the session from the database
|
|
5740
5933
|
if self.db is not None:
|
|
5741
|
-
|
|
5934
|
+
loaded_session = None
|
|
5935
|
+
|
|
5936
|
+
# We have a standalone agent, so we are loading an AgentSession
|
|
5937
|
+
if self.team_id is None and self.workflow_id is None:
|
|
5938
|
+
loaded_session = cast(
|
|
5939
|
+
AgentSession,
|
|
5940
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.AGENT), # type: ignore
|
|
5941
|
+
)
|
|
5942
|
+
|
|
5943
|
+
# We have a team member agent, so we are loading a TeamSession
|
|
5944
|
+
if loaded_session is None and self.team_id is not None:
|
|
5945
|
+
# Load session for team member agents
|
|
5946
|
+
loaded_session = cast(
|
|
5947
|
+
TeamSession,
|
|
5948
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.TEAM), # type: ignore
|
|
5949
|
+
)
|
|
5950
|
+
|
|
5951
|
+
# We have a workflow member agent, so we are loading a WorkflowSession
|
|
5952
|
+
if loaded_session is None and self.workflow_id is not None:
|
|
5953
|
+
# Load session for workflow memberagents
|
|
5954
|
+
loaded_session = cast(
|
|
5955
|
+
WorkflowSession,
|
|
5956
|
+
self._read_session(session_id=session_id_to_load, session_type=SessionType.WORKFLOW), # type: ignore
|
|
5957
|
+
)
|
|
5958
|
+
|
|
5959
|
+
# Cache the session if relevant
|
|
5960
|
+
if loaded_session is not None and self.cache_session:
|
|
5961
|
+
self._agent_session = loaded_session
|
|
5962
|
+
|
|
5963
|
+
return loaded_session
|
|
5964
|
+
|
|
5965
|
+
log_debug(f"Session {session_id_to_load} not found in db")
|
|
5966
|
+
return None
|
|
5967
|
+
|
|
5968
|
+
async def aget_session(
|
|
5969
|
+
self,
|
|
5970
|
+
session_id: Optional[str] = None,
|
|
5971
|
+
) -> Optional[AgentSession]:
|
|
5972
|
+
"""Load an AgentSession from database or cache.
|
|
5973
|
+
|
|
5974
|
+
Args:
|
|
5975
|
+
session_id: The session_id to load from storage.
|
|
5976
|
+
|
|
5977
|
+
Returns:
|
|
5978
|
+
AgentSession: The AgentSession loaded from the database/cache or None if not found.
|
|
5979
|
+
"""
|
|
5980
|
+
if not session_id and not self.session_id:
|
|
5981
|
+
raise Exception("No session_id provided")
|
|
5982
|
+
|
|
5983
|
+
session_id_to_load = session_id or self.session_id
|
|
5984
|
+
|
|
5985
|
+
# If there is a cached session, return it
|
|
5986
|
+
if self.cache_session and hasattr(self, "_agent_session") and self._agent_session is not None:
|
|
5987
|
+
if self._agent_session.session_id == session_id_to_load:
|
|
5988
|
+
return self._agent_session
|
|
5989
|
+
|
|
5990
|
+
# Load and return the session from the database
|
|
5991
|
+
if self.db is not None:
|
|
5992
|
+
agent_session = cast(AgentSession, await self._aread_session(session_id=session_id_to_load)) # type: ignore
|
|
5742
5993
|
|
|
5743
5994
|
# Cache the session if relevant
|
|
5744
5995
|
if agent_session is not None and self.cache_session:
|
|
@@ -5787,8 +6038,10 @@ class Agent:
|
|
|
5787
6038
|
session.session_data["session_state"].pop("current_session_id", None)
|
|
5788
6039
|
session.session_data["session_state"].pop("current_user_id", None)
|
|
5789
6040
|
session.session_data["session_state"].pop("current_run_id", None)
|
|
5790
|
-
|
|
5791
|
-
|
|
6041
|
+
if self._has_async_db():
|
|
6042
|
+
await self._aupsert_session(session=session)
|
|
6043
|
+
else:
|
|
6044
|
+
self._upsert_session(session=session)
|
|
5792
6045
|
log_debug(f"Created or updated AgentSession record: {session.session_id}")
|
|
5793
6046
|
|
|
5794
6047
|
def get_chat_history(self, session_id: Optional[str] = None) -> List[Message]:
|
|
@@ -5918,6 +6171,61 @@ class Agent:
|
|
|
5918
6171
|
raise Exception("Session not found")
|
|
5919
6172
|
return session.session_data.get("session_state", {}) if session.session_data is not None else {}
|
|
5920
6173
|
|
|
6174
|
+
def update_session_state(self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None) -> str:
|
|
6175
|
+
"""
|
|
6176
|
+
Update the session state for the given session ID and user ID.
|
|
6177
|
+
Args:
|
|
6178
|
+
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
|
|
6179
|
+
session_id: The session ID to update. If not provided, the current cached session ID is used.
|
|
6180
|
+
Returns:
|
|
6181
|
+
dict: The updated session state.
|
|
6182
|
+
"""
|
|
6183
|
+
session_id = session_id or self.session_id
|
|
6184
|
+
if session_id is None:
|
|
6185
|
+
raise Exception("Session ID is not set")
|
|
6186
|
+
session = self.get_session(session_id=session_id) # type: ignore
|
|
6187
|
+
if session is None:
|
|
6188
|
+
raise Exception("Session not found")
|
|
6189
|
+
|
|
6190
|
+
if session.session_data is not None and "session_state" not in session.session_data:
|
|
6191
|
+
session.session_data["session_state"] = {}
|
|
6192
|
+
|
|
6193
|
+
# Overwrite the loaded DB session state with the new session state
|
|
6194
|
+
for key, value in session_state_updates.items():
|
|
6195
|
+
session.session_data["session_state"][key] = value # type: ignore
|
|
6196
|
+
|
|
6197
|
+
self.save_session(session=session)
|
|
6198
|
+
|
|
6199
|
+
return session.session_data["session_state"] # type: ignore
|
|
6200
|
+
|
|
6201
|
+
async def aupdate_session_state(
|
|
6202
|
+
self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
|
|
6203
|
+
) -> str:
|
|
6204
|
+
"""
|
|
6205
|
+
Update the session state for the given session ID and user ID.
|
|
6206
|
+
Args:
|
|
6207
|
+
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
|
|
6208
|
+
session_id: The session ID to update. If not provided, the current cached session ID is used.
|
|
6209
|
+
Returns:
|
|
6210
|
+
dict: The updated session state.
|
|
6211
|
+
"""
|
|
6212
|
+
session_id = session_id or self.session_id
|
|
6213
|
+
if session_id is None:
|
|
6214
|
+
raise Exception("Session ID is not set")
|
|
6215
|
+
session = await self.aget_session(session_id=session_id) # type: ignore
|
|
6216
|
+
if session is None:
|
|
6217
|
+
raise Exception("Session not found")
|
|
6218
|
+
|
|
6219
|
+
if session.session_data is not None and "session_state" not in session.session_data:
|
|
6220
|
+
session.session_data["session_state"] = {}
|
|
6221
|
+
|
|
6222
|
+
for key, value in session_state_updates.items():
|
|
6223
|
+
session.session_data["session_state"][key] = value # type: ignore
|
|
6224
|
+
|
|
6225
|
+
await self.asave_session(session=session)
|
|
6226
|
+
|
|
6227
|
+
return session.session_data["session_state"] # type: ignore
|
|
6228
|
+
|
|
5921
6229
|
def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
|
|
5922
6230
|
"""Get the session metrics for the given session ID and user ID."""
|
|
5923
6231
|
session_id = session_id or self.session_id
|
|
@@ -6377,7 +6685,7 @@ class Agent:
|
|
|
6377
6685
|
system_message_content += f"{get_response_model_format_prompt(self.output_schema)}"
|
|
6378
6686
|
|
|
6379
6687
|
# 3.3.15 Add the session state to the system message
|
|
6380
|
-
if
|
|
6688
|
+
if add_session_state_to_context and session_state is not None:
|
|
6381
6689
|
system_message_content += f"\n<session_state>\n{session_state}\n</session_state>\n\n"
|
|
6382
6690
|
|
|
6383
6691
|
# Return the system message
|
|
@@ -7750,28 +8058,40 @@ class Agent:
|
|
|
7750
8058
|
|
|
7751
8059
|
def _handle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
|
|
7752
8060
|
if self.reasoning or self.reasoning_model is not None:
|
|
7753
|
-
reasoning_generator = self._reason(
|
|
8061
|
+
reasoning_generator = self._reason(
|
|
8062
|
+
run_response=run_response, run_messages=run_messages, stream_events=False
|
|
8063
|
+
)
|
|
7754
8064
|
|
|
7755
8065
|
# Consume the generator without yielding
|
|
7756
8066
|
deque(reasoning_generator, maxlen=0)
|
|
7757
8067
|
|
|
7758
|
-
def _handle_reasoning_stream(
|
|
8068
|
+
def _handle_reasoning_stream(
|
|
8069
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8070
|
+
) -> Iterator[RunOutputEvent]:
|
|
7759
8071
|
if self.reasoning or self.reasoning_model is not None:
|
|
7760
|
-
reasoning_generator = self._reason(
|
|
8072
|
+
reasoning_generator = self._reason(
|
|
8073
|
+
run_response=run_response,
|
|
8074
|
+
run_messages=run_messages,
|
|
8075
|
+
stream_events=stream_events,
|
|
8076
|
+
)
|
|
7761
8077
|
yield from reasoning_generator
|
|
7762
8078
|
|
|
7763
8079
|
async def _ahandle_reasoning(self, run_response: RunOutput, run_messages: RunMessages) -> None:
|
|
7764
8080
|
if self.reasoning or self.reasoning_model is not None:
|
|
7765
|
-
reason_generator = self._areason(run_response=run_response, run_messages=run_messages)
|
|
8081
|
+
reason_generator = self._areason(run_response=run_response, run_messages=run_messages, stream_events=False)
|
|
7766
8082
|
# Consume the generator without yielding
|
|
7767
8083
|
async for _ in reason_generator:
|
|
7768
8084
|
pass
|
|
7769
8085
|
|
|
7770
8086
|
async def _ahandle_reasoning_stream(
|
|
7771
|
-
self, run_response: RunOutput, run_messages: RunMessages
|
|
8087
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
7772
8088
|
) -> AsyncIterator[RunOutputEvent]:
|
|
7773
8089
|
if self.reasoning or self.reasoning_model is not None:
|
|
7774
|
-
reason_generator = self._areason(
|
|
8090
|
+
reason_generator = self._areason(
|
|
8091
|
+
run_response=run_response,
|
|
8092
|
+
run_messages=run_messages,
|
|
8093
|
+
stream_events=stream_events,
|
|
8094
|
+
)
|
|
7775
8095
|
async for item in reason_generator:
|
|
7776
8096
|
yield item
|
|
7777
8097
|
|
|
@@ -7798,12 +8118,16 @@ class Agent:
|
|
|
7798
8118
|
|
|
7799
8119
|
return updated_reasoning_content
|
|
7800
8120
|
|
|
7801
|
-
def _reason(
|
|
8121
|
+
def _reason(
|
|
8122
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8123
|
+
) -> Iterator[RunOutputEvent]:
|
|
7802
8124
|
# Yield a reasoning started event
|
|
7803
|
-
if
|
|
7804
|
-
yield
|
|
8125
|
+
if stream_events:
|
|
8126
|
+
yield handle_event( # type: ignore
|
|
7805
8127
|
create_reasoning_started_event(from_run_response=run_response),
|
|
7806
8128
|
run_response,
|
|
8129
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8130
|
+
store_events=self.store_events,
|
|
7807
8131
|
)
|
|
7808
8132
|
|
|
7809
8133
|
use_default_reasoning = False
|
|
@@ -7935,14 +8259,16 @@ class Agent:
|
|
|
7935
8259
|
reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
|
|
7936
8260
|
reasoning_agent_messages=[reasoning_message],
|
|
7937
8261
|
)
|
|
7938
|
-
if
|
|
7939
|
-
yield
|
|
8262
|
+
if stream_events:
|
|
8263
|
+
yield handle_event( # type: ignore
|
|
7940
8264
|
create_reasoning_completed_event(
|
|
7941
8265
|
from_run_response=run_response,
|
|
7942
8266
|
content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
|
|
7943
8267
|
content_type=ReasoningSteps.__name__,
|
|
7944
8268
|
),
|
|
7945
8269
|
run_response,
|
|
8270
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8271
|
+
store_events=self.store_events,
|
|
7946
8272
|
)
|
|
7947
8273
|
else:
|
|
7948
8274
|
log_warning(
|
|
@@ -8011,7 +8337,7 @@ class Agent:
|
|
|
8011
8337
|
)
|
|
8012
8338
|
break
|
|
8013
8339
|
|
|
8014
|
-
if (
|
|
8340
|
+
if reasoning_agent_response.content is not None and (
|
|
8015
8341
|
reasoning_agent_response.content.reasoning_steps is None
|
|
8016
8342
|
or len(reasoning_agent_response.content.reasoning_steps) == 0
|
|
8017
8343
|
):
|
|
@@ -8021,20 +8347,22 @@ class Agent:
|
|
|
8021
8347
|
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
8022
8348
|
all_reasoning_steps.extend(reasoning_steps)
|
|
8023
8349
|
# Yield reasoning steps
|
|
8024
|
-
if
|
|
8350
|
+
if stream_events:
|
|
8025
8351
|
for reasoning_step in reasoning_steps:
|
|
8026
8352
|
updated_reasoning_content = self._format_reasoning_step_content(
|
|
8027
8353
|
run_response=run_response,
|
|
8028
8354
|
reasoning_step=reasoning_step,
|
|
8029
8355
|
)
|
|
8030
8356
|
|
|
8031
|
-
yield
|
|
8357
|
+
yield handle_event( # type: ignore
|
|
8032
8358
|
create_reasoning_step_event(
|
|
8033
8359
|
from_run_response=run_response,
|
|
8034
8360
|
reasoning_step=reasoning_step,
|
|
8035
8361
|
reasoning_content=updated_reasoning_content,
|
|
8036
8362
|
),
|
|
8037
8363
|
run_response,
|
|
8364
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8365
|
+
store_events=self.store_events,
|
|
8038
8366
|
)
|
|
8039
8367
|
|
|
8040
8368
|
# Find the index of the first assistant message
|
|
@@ -8071,22 +8399,28 @@ class Agent:
|
|
|
8071
8399
|
)
|
|
8072
8400
|
|
|
8073
8401
|
# Yield the final reasoning completed event
|
|
8074
|
-
if
|
|
8075
|
-
yield
|
|
8402
|
+
if stream_events:
|
|
8403
|
+
yield handle_event( # type: ignore
|
|
8076
8404
|
create_reasoning_completed_event(
|
|
8077
8405
|
from_run_response=run_response,
|
|
8078
8406
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
8079
8407
|
content_type=ReasoningSteps.__name__,
|
|
8080
8408
|
),
|
|
8081
8409
|
run_response,
|
|
8410
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8411
|
+
store_events=self.store_events,
|
|
8082
8412
|
)
|
|
8083
8413
|
|
|
8084
|
-
async def _areason(
|
|
8414
|
+
async def _areason(
|
|
8415
|
+
self, run_response: RunOutput, run_messages: RunMessages, stream_events: Optional[bool] = None
|
|
8416
|
+
) -> Any:
|
|
8085
8417
|
# Yield a reasoning started event
|
|
8086
|
-
if
|
|
8087
|
-
yield
|
|
8418
|
+
if stream_events:
|
|
8419
|
+
yield handle_event( # type: ignore
|
|
8088
8420
|
create_reasoning_started_event(from_run_response=run_response),
|
|
8089
8421
|
run_response,
|
|
8422
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8423
|
+
store_events=self.store_events,
|
|
8090
8424
|
)
|
|
8091
8425
|
|
|
8092
8426
|
use_default_reasoning = False
|
|
@@ -8218,14 +8552,16 @@ class Agent:
|
|
|
8218
8552
|
reasoning_steps=[ReasoningStep(result=reasoning_message.content)],
|
|
8219
8553
|
reasoning_agent_messages=[reasoning_message],
|
|
8220
8554
|
)
|
|
8221
|
-
if
|
|
8222
|
-
yield
|
|
8555
|
+
if stream_events:
|
|
8556
|
+
yield handle_event(
|
|
8223
8557
|
create_reasoning_completed_event(
|
|
8224
8558
|
from_run_response=run_response,
|
|
8225
8559
|
content=ReasoningSteps(reasoning_steps=[ReasoningStep(result=reasoning_message.content)]),
|
|
8226
8560
|
content_type=ReasoningSteps.__name__,
|
|
8227
8561
|
),
|
|
8228
8562
|
run_response,
|
|
8563
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8564
|
+
store_events=self.store_events,
|
|
8229
8565
|
)
|
|
8230
8566
|
else:
|
|
8231
8567
|
log_warning(
|
|
@@ -8304,7 +8640,7 @@ class Agent:
|
|
|
8304
8640
|
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
8305
8641
|
all_reasoning_steps.extend(reasoning_steps)
|
|
8306
8642
|
# Yield reasoning steps
|
|
8307
|
-
if
|
|
8643
|
+
if stream_events:
|
|
8308
8644
|
for reasoning_step in reasoning_steps:
|
|
8309
8645
|
updated_reasoning_content = self._format_reasoning_step_content(
|
|
8310
8646
|
run_response=run_response,
|
|
@@ -8312,13 +8648,15 @@ class Agent:
|
|
|
8312
8648
|
)
|
|
8313
8649
|
|
|
8314
8650
|
# Yield the response with the updated reasoning_content
|
|
8315
|
-
yield
|
|
8651
|
+
yield handle_event(
|
|
8316
8652
|
create_reasoning_step_event(
|
|
8317
8653
|
from_run_response=run_response,
|
|
8318
8654
|
reasoning_step=reasoning_step,
|
|
8319
8655
|
reasoning_content=updated_reasoning_content,
|
|
8320
8656
|
),
|
|
8321
8657
|
run_response,
|
|
8658
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8659
|
+
store_events=self.store_events,
|
|
8322
8660
|
)
|
|
8323
8661
|
|
|
8324
8662
|
# Find the index of the first assistant message
|
|
@@ -8354,14 +8692,16 @@ class Agent:
|
|
|
8354
8692
|
)
|
|
8355
8693
|
|
|
8356
8694
|
# Yield the final reasoning completed event
|
|
8357
|
-
if
|
|
8358
|
-
yield
|
|
8695
|
+
if stream_events:
|
|
8696
|
+
yield handle_event(
|
|
8359
8697
|
create_reasoning_completed_event(
|
|
8360
8698
|
from_run_response=run_response,
|
|
8361
8699
|
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
|
|
8362
8700
|
content_type=ReasoningSteps.__name__,
|
|
8363
8701
|
),
|
|
8364
8702
|
run_response,
|
|
8703
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8704
|
+
store_events=self.store_events,
|
|
8365
8705
|
)
|
|
8366
8706
|
|
|
8367
8707
|
def _process_parser_response(
|
|
@@ -8430,18 +8770,17 @@ class Agent:
|
|
|
8430
8770
|
log_warning("A response model is required to parse the response with a parser model")
|
|
8431
8771
|
|
|
8432
8772
|
def _parse_response_with_parser_model_stream(
|
|
8433
|
-
self,
|
|
8434
|
-
session: AgentSession,
|
|
8435
|
-
run_response: RunOutput,
|
|
8436
|
-
stream_intermediate_steps: bool = True,
|
|
8773
|
+
self, session: AgentSession, run_response: RunOutput, stream_events: bool = True
|
|
8437
8774
|
):
|
|
8438
8775
|
"""Parse the model response using the parser model"""
|
|
8439
8776
|
if self.parser_model is not None:
|
|
8440
8777
|
if self.output_schema is not None:
|
|
8441
|
-
if
|
|
8442
|
-
yield
|
|
8778
|
+
if stream_events:
|
|
8779
|
+
yield handle_event(
|
|
8443
8780
|
create_parser_model_response_started_event(run_response),
|
|
8444
8781
|
run_response,
|
|
8782
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8783
|
+
store_events=self.store_events,
|
|
8445
8784
|
)
|
|
8446
8785
|
|
|
8447
8786
|
parser_model_response = ModelResponse(content="")
|
|
@@ -8460,7 +8799,7 @@ class Agent:
|
|
|
8460
8799
|
model_response=parser_model_response,
|
|
8461
8800
|
model_response_event=model_response_event,
|
|
8462
8801
|
parse_structured_output=True,
|
|
8463
|
-
|
|
8802
|
+
stream_events=stream_events,
|
|
8464
8803
|
)
|
|
8465
8804
|
|
|
8466
8805
|
parser_model_response_message: Optional[Message] = None
|
|
@@ -8474,28 +8813,29 @@ class Agent:
|
|
|
8474
8813
|
else:
|
|
8475
8814
|
log_warning("Unable to parse response with parser model")
|
|
8476
8815
|
|
|
8477
|
-
if
|
|
8478
|
-
yield
|
|
8816
|
+
if stream_events:
|
|
8817
|
+
yield handle_event(
|
|
8479
8818
|
create_parser_model_response_completed_event(run_response),
|
|
8480
8819
|
run_response,
|
|
8820
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8821
|
+
store_events=self.store_events,
|
|
8481
8822
|
)
|
|
8482
8823
|
|
|
8483
8824
|
else:
|
|
8484
8825
|
log_warning("A response model is required to parse the response with a parser model")
|
|
8485
8826
|
|
|
8486
8827
|
async def _aparse_response_with_parser_model_stream(
|
|
8487
|
-
self,
|
|
8488
|
-
session: AgentSession,
|
|
8489
|
-
run_response: RunOutput,
|
|
8490
|
-
stream_intermediate_steps: bool = True,
|
|
8828
|
+
self, session: AgentSession, run_response: RunOutput, stream_events: bool = True
|
|
8491
8829
|
):
|
|
8492
8830
|
"""Parse the model response using the parser model stream."""
|
|
8493
8831
|
if self.parser_model is not None:
|
|
8494
8832
|
if self.output_schema is not None:
|
|
8495
|
-
if
|
|
8496
|
-
yield
|
|
8833
|
+
if stream_events:
|
|
8834
|
+
yield handle_event(
|
|
8497
8835
|
create_parser_model_response_started_event(run_response),
|
|
8498
8836
|
run_response,
|
|
8837
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8838
|
+
store_events=self.store_events,
|
|
8499
8839
|
)
|
|
8500
8840
|
|
|
8501
8841
|
parser_model_response = ModelResponse(content="")
|
|
@@ -8515,7 +8855,7 @@ class Agent:
|
|
|
8515
8855
|
model_response=parser_model_response,
|
|
8516
8856
|
model_response_event=model_response_event,
|
|
8517
8857
|
parse_structured_output=True,
|
|
8518
|
-
|
|
8858
|
+
stream_events=stream_events,
|
|
8519
8859
|
):
|
|
8520
8860
|
yield event
|
|
8521
8861
|
|
|
@@ -8530,10 +8870,12 @@ class Agent:
|
|
|
8530
8870
|
else:
|
|
8531
8871
|
log_warning("Unable to parse response with parser model")
|
|
8532
8872
|
|
|
8533
|
-
if
|
|
8534
|
-
yield
|
|
8873
|
+
if stream_events:
|
|
8874
|
+
yield handle_event(
|
|
8535
8875
|
create_parser_model_response_completed_event(run_response),
|
|
8536
8876
|
run_response,
|
|
8877
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8878
|
+
store_events=self.store_events,
|
|
8537
8879
|
)
|
|
8538
8880
|
else:
|
|
8539
8881
|
log_warning("A response model is required to parse the response with a parser model")
|
|
@@ -8552,7 +8894,7 @@ class Agent:
|
|
|
8552
8894
|
session: AgentSession,
|
|
8553
8895
|
run_response: RunOutput,
|
|
8554
8896
|
run_messages: RunMessages,
|
|
8555
|
-
|
|
8897
|
+
stream_events: bool = False,
|
|
8556
8898
|
):
|
|
8557
8899
|
"""Parse the model response using the output model."""
|
|
8558
8900
|
from agno.utils.events import (
|
|
@@ -8563,8 +8905,13 @@ class Agent:
|
|
|
8563
8905
|
if self.output_model is None:
|
|
8564
8906
|
return
|
|
8565
8907
|
|
|
8566
|
-
if
|
|
8567
|
-
yield
|
|
8908
|
+
if stream_events:
|
|
8909
|
+
yield handle_event(
|
|
8910
|
+
create_output_model_response_started_event(run_response),
|
|
8911
|
+
run_response,
|
|
8912
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8913
|
+
store_events=self.store_events,
|
|
8914
|
+
)
|
|
8568
8915
|
|
|
8569
8916
|
messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
|
|
8570
8917
|
|
|
@@ -8576,11 +8923,16 @@ class Agent:
|
|
|
8576
8923
|
run_response=run_response,
|
|
8577
8924
|
model_response=model_response,
|
|
8578
8925
|
model_response_event=model_response_event,
|
|
8579
|
-
|
|
8926
|
+
stream_events=stream_events,
|
|
8580
8927
|
)
|
|
8581
8928
|
|
|
8582
|
-
if
|
|
8583
|
-
yield
|
|
8929
|
+
if stream_events:
|
|
8930
|
+
yield handle_event(
|
|
8931
|
+
create_output_model_response_completed_event(run_response),
|
|
8932
|
+
run_response,
|
|
8933
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8934
|
+
store_events=self.store_events,
|
|
8935
|
+
)
|
|
8584
8936
|
|
|
8585
8937
|
# Build a list of messages that should be added to the RunResponse
|
|
8586
8938
|
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
@@ -8603,7 +8955,7 @@ class Agent:
|
|
|
8603
8955
|
session: AgentSession,
|
|
8604
8956
|
run_response: RunOutput,
|
|
8605
8957
|
run_messages: RunMessages,
|
|
8606
|
-
|
|
8958
|
+
stream_events: bool = False,
|
|
8607
8959
|
):
|
|
8608
8960
|
"""Parse the model response using the output model."""
|
|
8609
8961
|
from agno.utils.events import (
|
|
@@ -8614,8 +8966,13 @@ class Agent:
|
|
|
8614
8966
|
if self.output_model is None:
|
|
8615
8967
|
return
|
|
8616
8968
|
|
|
8617
|
-
if
|
|
8618
|
-
yield
|
|
8969
|
+
if stream_events:
|
|
8970
|
+
yield handle_event(
|
|
8971
|
+
create_output_model_response_started_event(run_response),
|
|
8972
|
+
run_response,
|
|
8973
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8974
|
+
store_events=self.store_events,
|
|
8975
|
+
)
|
|
8619
8976
|
|
|
8620
8977
|
messages_for_output_model = self._get_messages_for_output_model(run_messages.messages)
|
|
8621
8978
|
|
|
@@ -8629,12 +8986,17 @@ class Agent:
|
|
|
8629
8986
|
run_response=run_response,
|
|
8630
8987
|
model_response=model_response,
|
|
8631
8988
|
model_response_event=model_response_event,
|
|
8632
|
-
|
|
8989
|
+
stream_events=stream_events,
|
|
8633
8990
|
):
|
|
8634
8991
|
yield event
|
|
8635
8992
|
|
|
8636
|
-
if
|
|
8637
|
-
yield
|
|
8993
|
+
if stream_events:
|
|
8994
|
+
yield handle_event(
|
|
8995
|
+
create_output_model_response_completed_event(run_response),
|
|
8996
|
+
run_response,
|
|
8997
|
+
events_to_skip=self.events_to_skip, # type: ignore
|
|
8998
|
+
store_events=self.store_events,
|
|
8999
|
+
)
|
|
8638
9000
|
|
|
8639
9001
|
# Build a list of messages that should be added to the RunResponse
|
|
8640
9002
|
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
@@ -8643,15 +9005,6 @@ class Agent:
|
|
|
8643
9005
|
# Update the RunResponse metrics
|
|
8644
9006
|
run_response.metrics = self._calculate_run_metrics(messages_for_run_response)
|
|
8645
9007
|
|
|
8646
|
-
def _handle_event(self, event: RunOutputEvent, run_response: RunOutput):
|
|
8647
|
-
# We only store events that are not run_response_content events
|
|
8648
|
-
events_to_skip = [event.value for event in self.events_to_skip] if self.events_to_skip else []
|
|
8649
|
-
if self.store_events and event.event not in events_to_skip:
|
|
8650
|
-
if run_response.events is None:
|
|
8651
|
-
run_response.events = []
|
|
8652
|
-
run_response.events.append(event)
|
|
8653
|
-
return event
|
|
8654
|
-
|
|
8655
9008
|
###########################################################################
|
|
8656
9009
|
# Default Tools
|
|
8657
9010
|
###########################################################################
|
|
@@ -8779,7 +9132,7 @@ class Agent:
|
|
|
8779
9132
|
|
|
8780
9133
|
return get_tool_call_history
|
|
8781
9134
|
|
|
8782
|
-
def
|
|
9135
|
+
def _update_session_state_tool(self, session_state, session_state_updates: dict) -> str:
|
|
8783
9136
|
"""
|
|
8784
9137
|
Update the shared session state. Provide any updates as a dictionary of key-value pairs.
|
|
8785
9138
|
Example:
|
|
@@ -9125,6 +9478,7 @@ class Agent:
|
|
|
9125
9478
|
videos: Optional[Sequence[Video]] = None,
|
|
9126
9479
|
files: Optional[Sequence[File]] = None,
|
|
9127
9480
|
stream: Optional[bool] = None,
|
|
9481
|
+
stream_events: Optional[bool] = None,
|
|
9128
9482
|
stream_intermediate_steps: Optional[bool] = None,
|
|
9129
9483
|
markdown: Optional[bool] = None,
|
|
9130
9484
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -9156,11 +9510,19 @@ class Agent:
|
|
|
9156
9510
|
if self.output_schema is not None:
|
|
9157
9511
|
markdown = False
|
|
9158
9512
|
|
|
9513
|
+
# Use stream override value when necessary
|
|
9159
9514
|
if stream is None:
|
|
9160
|
-
stream = self.stream
|
|
9515
|
+
stream = False if self.stream is None else self.stream
|
|
9516
|
+
|
|
9517
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
9518
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
9519
|
+
|
|
9520
|
+
# Can't stream events if streaming is disabled
|
|
9521
|
+
if stream is False:
|
|
9522
|
+
stream_events = False
|
|
9161
9523
|
|
|
9162
|
-
if
|
|
9163
|
-
|
|
9524
|
+
if stream_events is None:
|
|
9525
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
9164
9526
|
|
|
9165
9527
|
if stream:
|
|
9166
9528
|
print_response_stream(
|
|
@@ -9173,7 +9535,7 @@ class Agent:
|
|
|
9173
9535
|
images=images,
|
|
9174
9536
|
videos=videos,
|
|
9175
9537
|
files=files,
|
|
9176
|
-
|
|
9538
|
+
stream_events=stream_events,
|
|
9177
9539
|
knowledge_filters=knowledge_filters,
|
|
9178
9540
|
debug_mode=debug_mode,
|
|
9179
9541
|
markdown=markdown,
|
|
@@ -9201,7 +9563,7 @@ class Agent:
|
|
|
9201
9563
|
images=images,
|
|
9202
9564
|
videos=videos,
|
|
9203
9565
|
files=files,
|
|
9204
|
-
|
|
9566
|
+
stream_events=stream_events,
|
|
9205
9567
|
knowledge_filters=knowledge_filters,
|
|
9206
9568
|
debug_mode=debug_mode,
|
|
9207
9569
|
markdown=markdown,
|
|
@@ -9230,6 +9592,7 @@ class Agent:
|
|
|
9230
9592
|
videos: Optional[Sequence[Video]] = None,
|
|
9231
9593
|
files: Optional[Sequence[File]] = None,
|
|
9232
9594
|
stream: Optional[bool] = None,
|
|
9595
|
+
stream_events: Optional[bool] = None,
|
|
9233
9596
|
stream_intermediate_steps: Optional[bool] = None,
|
|
9234
9597
|
markdown: Optional[bool] = None,
|
|
9235
9598
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
@@ -9259,8 +9622,15 @@ class Agent:
|
|
|
9259
9622
|
if stream is None:
|
|
9260
9623
|
stream = self.stream or False
|
|
9261
9624
|
|
|
9262
|
-
|
|
9263
|
-
|
|
9625
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
9626
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
9627
|
+
|
|
9628
|
+
# Can't stream events if streaming is disabled
|
|
9629
|
+
if stream is False:
|
|
9630
|
+
stream_events = False
|
|
9631
|
+
|
|
9632
|
+
if stream_events is None:
|
|
9633
|
+
stream_events = False if self.stream_events is None else self.stream_events
|
|
9264
9634
|
|
|
9265
9635
|
if stream:
|
|
9266
9636
|
await aprint_response_stream(
|
|
@@ -9273,7 +9643,7 @@ class Agent:
|
|
|
9273
9643
|
images=images,
|
|
9274
9644
|
videos=videos,
|
|
9275
9645
|
files=files,
|
|
9276
|
-
|
|
9646
|
+
stream_events=stream_events,
|
|
9277
9647
|
knowledge_filters=knowledge_filters,
|
|
9278
9648
|
debug_mode=debug_mode,
|
|
9279
9649
|
markdown=markdown,
|
|
@@ -9300,7 +9670,6 @@ class Agent:
|
|
|
9300
9670
|
images=images,
|
|
9301
9671
|
videos=videos,
|
|
9302
9672
|
files=files,
|
|
9303
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
9304
9673
|
knowledge_filters=knowledge_filters,
|
|
9305
9674
|
debug_mode=debug_mode,
|
|
9306
9675
|
markdown=markdown,
|
|
@@ -9436,108 +9805,70 @@ class Agent:
|
|
|
9436
9805
|
|
|
9437
9806
|
return effective_filters
|
|
9438
9807
|
|
|
9439
|
-
def
|
|
9440
|
-
|
|
9441
|
-
|
|
9442
|
-
This includes media in input, output artifacts, and all messages.
|
|
9443
|
-
"""
|
|
9444
|
-
# 1. Scrub RunInput media
|
|
9445
|
-
if run_response.input is not None:
|
|
9446
|
-
run_response.input.images = []
|
|
9447
|
-
run_response.input.videos = []
|
|
9448
|
-
run_response.input.audios = []
|
|
9449
|
-
run_response.input.files = []
|
|
9450
|
-
|
|
9451
|
-
# 3. Scrub media from all messages
|
|
9452
|
-
if run_response.messages:
|
|
9453
|
-
for message in run_response.messages:
|
|
9454
|
-
self._scrub_media_from_message(message)
|
|
9455
|
-
|
|
9456
|
-
# 4. Scrub media from additional_input messages if any
|
|
9457
|
-
if run_response.additional_input:
|
|
9458
|
-
for message in run_response.additional_input:
|
|
9459
|
-
self._scrub_media_from_message(message)
|
|
9460
|
-
|
|
9461
|
-
# 5. Scrub media from reasoning_messages if any
|
|
9462
|
-
if run_response.reasoning_messages:
|
|
9463
|
-
for message in run_response.reasoning_messages:
|
|
9464
|
-
self._scrub_media_from_message(message)
|
|
9465
|
-
|
|
9466
|
-
def _scrub_media_from_message(self, message: Message) -> None:
|
|
9467
|
-
"""Remove all media from a Message object."""
|
|
9468
|
-
# Input media
|
|
9469
|
-
message.images = None
|
|
9470
|
-
message.videos = None
|
|
9471
|
-
message.audio = None
|
|
9472
|
-
message.files = None
|
|
9473
|
-
|
|
9474
|
-
# Output media
|
|
9475
|
-
message.audio_output = None
|
|
9476
|
-
message.image_output = None
|
|
9477
|
-
message.video_output = None
|
|
9478
|
-
|
|
9479
|
-
def _scrub_tool_results_from_run_output(self, run_response: RunOutput) -> None:
|
|
9480
|
-
"""
|
|
9481
|
-
Remove all tool-related data from RunOutput when store_tool_messages=False.
|
|
9482
|
-
This removes both the tool call and its corresponding result to maintain API consistency.
|
|
9483
|
-
"""
|
|
9484
|
-
if not run_response.messages:
|
|
9485
|
-
return
|
|
9808
|
+
def _cleanup_and_store(self, run_response: RunOutput, session: AgentSession, user_id: Optional[str] = None) -> None:
|
|
9809
|
+
# Scrub the stored run based on storage flags
|
|
9810
|
+
self._scrub_run_output_for_storage(run_response)
|
|
9486
9811
|
|
|
9487
|
-
#
|
|
9488
|
-
|
|
9489
|
-
|
|
9490
|
-
if message.role == "tool" and message.tool_call_id:
|
|
9491
|
-
tool_call_ids_to_remove.add(message.tool_call_id)
|
|
9492
|
-
|
|
9493
|
-
# Step 2: Remove tool result messages (role="tool")
|
|
9494
|
-
run_response.messages = [msg for msg in run_response.messages if msg.role != "tool"]
|
|
9495
|
-
|
|
9496
|
-
# Step 3: Remove the assistant messages related to the scrubbed tool calls
|
|
9497
|
-
filtered_messages = []
|
|
9498
|
-
for message in run_response.messages:
|
|
9499
|
-
# Check if this assistant message made any of the tool calls we're removing
|
|
9500
|
-
should_remove = False
|
|
9501
|
-
if message.role == "assistant" and message.tool_calls:
|
|
9502
|
-
for tool_call in message.tool_calls:
|
|
9503
|
-
if tool_call.get("id") in tool_call_ids_to_remove:
|
|
9504
|
-
should_remove = True
|
|
9505
|
-
break
|
|
9812
|
+
# Stop the timer for the Run duration
|
|
9813
|
+
if run_response.metrics:
|
|
9814
|
+
run_response.metrics.stop_timer()
|
|
9506
9815
|
|
|
9507
|
-
|
|
9508
|
-
|
|
9816
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
9817
|
+
self.save_run_response_to_file(
|
|
9818
|
+
run_response=run_response,
|
|
9819
|
+
input=run_response.input.input_content_string() if run_response.input else "",
|
|
9820
|
+
session_id=session.session_id,
|
|
9821
|
+
user_id=user_id,
|
|
9822
|
+
)
|
|
9509
9823
|
|
|
9510
|
-
|
|
9824
|
+
# Add RunOutput to Agent Session
|
|
9825
|
+
session.upsert_run(run=run_response)
|
|
9511
9826
|
|
|
9512
|
-
|
|
9513
|
-
|
|
9514
|
-
|
|
9515
|
-
|
|
9516
|
-
|
|
9517
|
-
|
|
9518
|
-
|
|
9519
|
-
|
|
9827
|
+
# Calculate session metrics
|
|
9828
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
9829
|
+
|
|
9830
|
+
# Save session to memory
|
|
9831
|
+
self.save_session(session=session)
|
|
9832
|
+
|
|
9833
|
+
async def _acleanup_and_store(
|
|
9834
|
+
self, run_response: RunOutput, session: AgentSession, user_id: Optional[str] = None
|
|
9835
|
+
) -> None:
|
|
9836
|
+
# Scrub the stored run based on storage flags
|
|
9837
|
+
self._scrub_run_output_for_storage(run_response)
|
|
9838
|
+
|
|
9839
|
+
# Stop the timer for the Run duration
|
|
9840
|
+
if run_response.metrics:
|
|
9841
|
+
run_response.metrics.stop_timer()
|
|
9842
|
+
|
|
9843
|
+
# Optional: Save output to file if save_response_to_file is set
|
|
9844
|
+
self.save_run_response_to_file(
|
|
9845
|
+
run_response=run_response,
|
|
9846
|
+
input=run_response.input.input_content_string() if run_response.input else "",
|
|
9847
|
+
session_id=session.session_id,
|
|
9848
|
+
user_id=user_id,
|
|
9849
|
+
)
|
|
9520
9850
|
|
|
9521
|
-
|
|
9851
|
+
# Add RunOutput to Agent Session
|
|
9852
|
+
session.upsert_run(run=run_response)
|
|
9853
|
+
|
|
9854
|
+
# Calculate session metrics
|
|
9855
|
+
self._update_session_metrics(session=session, run_response=run_response)
|
|
9856
|
+
|
|
9857
|
+
# Save session to storage
|
|
9858
|
+
await self.asave_session(session=session)
|
|
9859
|
+
|
|
9860
|
+
def _scrub_run_output_for_storage(self, run_response: RunOutput) -> None:
|
|
9522
9861
|
"""
|
|
9523
9862
|
Scrub run output based on storage flags before persisting to database.
|
|
9524
|
-
Returns True if any scrubbing was done, False otherwise.
|
|
9525
9863
|
"""
|
|
9526
|
-
scrubbed = False
|
|
9527
|
-
|
|
9528
9864
|
if not self.store_media:
|
|
9529
|
-
|
|
9530
|
-
scrubbed = True
|
|
9865
|
+
scrub_media_from_run_output(run_response)
|
|
9531
9866
|
|
|
9532
9867
|
if not self.store_tool_messages:
|
|
9533
|
-
|
|
9534
|
-
scrubbed = True
|
|
9868
|
+
scrub_tool_results_from_run_output(run_response)
|
|
9535
9869
|
|
|
9536
9870
|
if not self.store_history_messages:
|
|
9537
|
-
|
|
9538
|
-
scrubbed = True
|
|
9539
|
-
|
|
9540
|
-
return scrubbed
|
|
9871
|
+
scrub_history_messages_from_run_output(run_response)
|
|
9541
9872
|
|
|
9542
9873
|
def _validate_media_object_id(
|
|
9543
9874
|
self,
|