agno 2.3.1__py3-none-any.whl → 2.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +514 -186
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +176 -0
- agno/db/dynamo/dynamo.py +11 -0
- agno/db/firestore/firestore.py +5 -1
- agno/db/gcs_json/gcs_json_db.py +5 -2
- agno/db/in_memory/in_memory_db.py +5 -2
- agno/db/json/json_db.py +5 -1
- agno/db/migrations/manager.py +4 -4
- agno/db/mongo/async_mongo.py +158 -34
- agno/db/mongo/mongo.py +6 -2
- agno/db/mysql/mysql.py +48 -54
- agno/db/postgres/async_postgres.py +61 -51
- agno/db/postgres/postgres.py +42 -50
- agno/db/redis/redis.py +5 -0
- agno/db/redis/utils.py +5 -5
- agno/db/schemas/memory.py +7 -5
- agno/db/singlestore/singlestore.py +99 -108
- agno/db/sqlite/async_sqlite.py +32 -30
- agno/db/sqlite/sqlite.py +34 -30
- agno/knowledge/reader/pdf_reader.py +2 -2
- agno/knowledge/reader/tavily_reader.py +0 -1
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +223 -8
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +67 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/anthropic/claude.py +84 -80
- agno/models/aws/bedrock.py +38 -16
- agno/models/aws/claude.py +97 -277
- agno/models/azure/ai_foundry.py +8 -4
- agno/models/base.py +101 -14
- agno/models/cerebras/cerebras.py +18 -7
- agno/models/cerebras/cerebras_openai.py +4 -2
- agno/models/cohere/chat.py +8 -4
- agno/models/google/gemini.py +578 -20
- agno/models/groq/groq.py +18 -5
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/litellm/chat.py +17 -7
- agno/models/message.py +19 -5
- agno/models/meta/llama.py +20 -4
- agno/models/mistral/mistral.py +8 -4
- agno/models/ollama/chat.py +17 -6
- agno/models/openai/chat.py +17 -6
- agno/models/openai/responses.py +23 -9
- agno/models/vertexai/claude.py +99 -5
- agno/os/interfaces/agui/router.py +1 -0
- agno/os/interfaces/agui/utils.py +97 -57
- agno/os/router.py +16 -1
- agno/os/routers/memory/memory.py +146 -0
- agno/os/routers/memory/schemas.py +26 -0
- agno/os/schema.py +21 -6
- agno/os/utils.py +134 -10
- agno/run/base.py +2 -1
- agno/run/workflow.py +1 -1
- agno/team/team.py +571 -225
- agno/tools/mcp/mcp.py +1 -1
- agno/utils/agent.py +119 -1
- agno/utils/dttm.py +33 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +12 -5
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +37 -2
- agno/utils/print_response/team.py +52 -0
- agno/utils/tokens.py +41 -0
- agno/workflow/types.py +2 -2
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/METADATA +45 -40
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/RECORD +75 -68
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/WHEEL +0 -0
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/top_level.txt +0 -0
agno/team/team.py
CHANGED
|
@@ -30,6 +30,7 @@ from uuid import uuid4
|
|
|
30
30
|
from pydantic import BaseModel
|
|
31
31
|
|
|
32
32
|
from agno.agent import Agent
|
|
33
|
+
from agno.compression.manager import CompressionManager
|
|
33
34
|
from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
|
|
34
35
|
from agno.exceptions import (
|
|
35
36
|
InputCheckError,
|
|
@@ -66,6 +67,8 @@ from agno.session.summary import SessionSummary
|
|
|
66
67
|
from agno.tools import Toolkit
|
|
67
68
|
from agno.tools.function import Function
|
|
68
69
|
from agno.utils.agent import (
|
|
70
|
+
aexecute_instructions,
|
|
71
|
+
aexecute_system_message,
|
|
69
72
|
aget_last_run_output_util,
|
|
70
73
|
aget_run_output_util,
|
|
71
74
|
aget_session_metrics_util,
|
|
@@ -79,6 +82,8 @@ from agno.utils.agent import (
|
|
|
79
82
|
collect_joint_files,
|
|
80
83
|
collect_joint_images,
|
|
81
84
|
collect_joint_videos,
|
|
85
|
+
execute_instructions,
|
|
86
|
+
execute_system_message,
|
|
82
87
|
get_last_run_output_util,
|
|
83
88
|
get_run_output_util,
|
|
84
89
|
get_session_metrics_util,
|
|
@@ -353,7 +358,7 @@ class Team:
|
|
|
353
358
|
output_model: Optional[Model] = None
|
|
354
359
|
# Provide a prompt for the output model
|
|
355
360
|
output_model_prompt: Optional[str] = None
|
|
356
|
-
#
|
|
361
|
+
# Intead of providing the model with the Pydantic output schema, add a JSON description of the output schema to the system message instead.
|
|
357
362
|
use_json_mode: bool = False
|
|
358
363
|
# If True, parse the response
|
|
359
364
|
parse_response: bool = True
|
|
@@ -375,6 +380,12 @@ class Team:
|
|
|
375
380
|
# If True, the team adds session summaries to the context
|
|
376
381
|
add_session_summary_to_context: Optional[bool] = None
|
|
377
382
|
|
|
383
|
+
# --- Context Compression ---
|
|
384
|
+
# If True, compress tool call results to save context
|
|
385
|
+
compress_tool_results: bool = False
|
|
386
|
+
# Compression manager for compressing tool call results
|
|
387
|
+
compression_manager: Optional["CompressionManager"] = None
|
|
388
|
+
|
|
378
389
|
# --- Team History ---
|
|
379
390
|
# add_history_to_context=true adds messages from the chat history to the messages list sent to the Model.
|
|
380
391
|
add_history_to_context: bool = False
|
|
@@ -516,6 +527,8 @@ class Team:
|
|
|
516
527
|
enable_session_summaries: bool = False,
|
|
517
528
|
session_summary_manager: Optional[SessionSummaryManager] = None,
|
|
518
529
|
add_session_summary_to_context: Optional[bool] = None,
|
|
530
|
+
compress_tool_results: bool = False,
|
|
531
|
+
compression_manager: Optional["CompressionManager"] = None,
|
|
519
532
|
metadata: Optional[Dict[str, Any]] = None,
|
|
520
533
|
reasoning: bool = False,
|
|
521
534
|
reasoning_model: Optional[Union[Model, str]] = None,
|
|
@@ -645,6 +658,11 @@ class Team:
|
|
|
645
658
|
self.enable_session_summaries = enable_session_summaries
|
|
646
659
|
self.session_summary_manager = session_summary_manager
|
|
647
660
|
self.add_session_summary_to_context = add_session_summary_to_context
|
|
661
|
+
|
|
662
|
+
# Context compression settings
|
|
663
|
+
self.compress_tool_results = compress_tool_results
|
|
664
|
+
self.compression_manager = compression_manager
|
|
665
|
+
|
|
648
666
|
self.metadata = metadata
|
|
649
667
|
|
|
650
668
|
self.reasoning = reasoning
|
|
@@ -720,10 +738,6 @@ class Team:
|
|
|
720
738
|
self._background_executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix="agno-bg")
|
|
721
739
|
return self._background_executor
|
|
722
740
|
|
|
723
|
-
@property
|
|
724
|
-
def should_parse_structured_output(self) -> bool:
|
|
725
|
-
return self.output_schema is not None and self.parse_response and self.parser_model is None
|
|
726
|
-
|
|
727
741
|
@property
|
|
728
742
|
def cached_session(self) -> Optional[TeamSession]:
|
|
729
743
|
return self._cached_session
|
|
@@ -869,6 +883,21 @@ class Team:
|
|
|
869
883
|
self.enable_session_summaries or self.session_summary_manager is not None
|
|
870
884
|
)
|
|
871
885
|
|
|
886
|
+
def _set_compression_manager(self) -> None:
|
|
887
|
+
if self.compress_tool_results and self.compression_manager is None:
|
|
888
|
+
self.compression_manager = CompressionManager(
|
|
889
|
+
model=self.model,
|
|
890
|
+
)
|
|
891
|
+
elif self.compression_manager is not None and self.compression_manager.model is None:
|
|
892
|
+
# If compression manager exists but has no model, use the team's model
|
|
893
|
+
self.compression_manager.model = self.model
|
|
894
|
+
|
|
895
|
+
if self.compression_manager is not None:
|
|
896
|
+
if self.compression_manager.model is None:
|
|
897
|
+
self.compression_manager.model = self.model
|
|
898
|
+
if self.compression_manager.compress_tool_results:
|
|
899
|
+
self.compress_tool_results = True
|
|
900
|
+
|
|
872
901
|
def _initialize_session(
|
|
873
902
|
self,
|
|
874
903
|
session_id: Optional[str] = None,
|
|
@@ -946,6 +975,8 @@ class Team:
|
|
|
946
975
|
self._set_memory_manager()
|
|
947
976
|
if self.enable_session_summaries or self.session_summary_manager is not None:
|
|
948
977
|
self._set_session_summary_manager()
|
|
978
|
+
if self.compress_tool_results or self.compression_manager is not None:
|
|
979
|
+
self._set_compression_manager()
|
|
949
980
|
|
|
950
981
|
log_debug(f"Team ID: {self.id}", center=True)
|
|
951
982
|
|
|
@@ -980,7 +1011,12 @@ class Team:
|
|
|
980
1011
|
"""Connect the MCP tools to the agent."""
|
|
981
1012
|
if self.tools is not None:
|
|
982
1013
|
for tool in self.tools:
|
|
983
|
-
|
|
1014
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
1015
|
+
if (
|
|
1016
|
+
hasattr(type(tool), "__mro__")
|
|
1017
|
+
and any(c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__)
|
|
1018
|
+
and not tool.initialized # type: ignore
|
|
1019
|
+
):
|
|
984
1020
|
# Connect the MCP server
|
|
985
1021
|
await tool.connect() # type: ignore
|
|
986
1022
|
self._mcp_tools_initialized_on_run.append(tool)
|
|
@@ -1000,6 +1036,7 @@ class Team:
|
|
|
1000
1036
|
run_context: RunContext,
|
|
1001
1037
|
user_id: Optional[str] = None,
|
|
1002
1038
|
debug_mode: Optional[bool] = None,
|
|
1039
|
+
stream_events: bool = False,
|
|
1003
1040
|
**kwargs: Any,
|
|
1004
1041
|
) -> Iterator[TeamRunOutputEvent]:
|
|
1005
1042
|
"""Execute multiple pre-hook functions in succession."""
|
|
@@ -1021,28 +1058,30 @@ class Team:
|
|
|
1021
1058
|
all_args.update(kwargs)
|
|
1022
1059
|
|
|
1023
1060
|
for i, hook in enumerate(hooks):
|
|
1024
|
-
|
|
1025
|
-
run_response=run_response,
|
|
1026
|
-
event=create_team_pre_hook_started_event(
|
|
1027
|
-
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
1028
|
-
),
|
|
1029
|
-
events_to_skip=self.events_to_skip,
|
|
1030
|
-
store_events=self.store_events,
|
|
1031
|
-
)
|
|
1032
|
-
try:
|
|
1033
|
-
# Filter arguments to only include those that the hook accepts
|
|
1034
|
-
filtered_args = filter_hook_args(hook, all_args)
|
|
1035
|
-
|
|
1036
|
-
hook(**filtered_args)
|
|
1037
|
-
|
|
1061
|
+
if stream_events:
|
|
1038
1062
|
yield handle_event( # type: ignore
|
|
1039
1063
|
run_response=run_response,
|
|
1040
|
-
event=
|
|
1064
|
+
event=create_team_pre_hook_started_event(
|
|
1041
1065
|
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
1042
1066
|
),
|
|
1043
1067
|
events_to_skip=self.events_to_skip,
|
|
1044
1068
|
store_events=self.store_events,
|
|
1045
1069
|
)
|
|
1070
|
+
try:
|
|
1071
|
+
# Filter arguments to only include those that the hook accepts
|
|
1072
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
1073
|
+
|
|
1074
|
+
hook(**filtered_args)
|
|
1075
|
+
|
|
1076
|
+
if stream_events:
|
|
1077
|
+
yield handle_event( # type: ignore
|
|
1078
|
+
run_response=run_response,
|
|
1079
|
+
event=create_team_pre_hook_completed_event(
|
|
1080
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
1081
|
+
),
|
|
1082
|
+
events_to_skip=self.events_to_skip,
|
|
1083
|
+
store_events=self.store_events,
|
|
1084
|
+
)
|
|
1046
1085
|
|
|
1047
1086
|
except (InputCheckError, OutputCheckError) as e:
|
|
1048
1087
|
raise e
|
|
@@ -1065,6 +1104,7 @@ class Team:
|
|
|
1065
1104
|
run_context: RunContext,
|
|
1066
1105
|
user_id: Optional[str] = None,
|
|
1067
1106
|
debug_mode: Optional[bool] = None,
|
|
1107
|
+
stream_events: bool = False,
|
|
1068
1108
|
**kwargs: Any,
|
|
1069
1109
|
) -> AsyncIterator[TeamRunOutputEvent]:
|
|
1070
1110
|
"""Execute multiple pre-hook functions in succession (async version)."""
|
|
@@ -1086,14 +1126,15 @@ class Team:
|
|
|
1086
1126
|
all_args.update(kwargs)
|
|
1087
1127
|
|
|
1088
1128
|
for i, hook in enumerate(hooks):
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1129
|
+
if stream_events:
|
|
1130
|
+
yield handle_event( # type: ignore
|
|
1131
|
+
run_response=run_response,
|
|
1132
|
+
event=create_team_pre_hook_started_event(
|
|
1133
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
1134
|
+
),
|
|
1135
|
+
events_to_skip=self.events_to_skip,
|
|
1136
|
+
store_events=self.store_events,
|
|
1137
|
+
)
|
|
1097
1138
|
try:
|
|
1098
1139
|
# Filter arguments to only include those that the hook accepts
|
|
1099
1140
|
filtered_args = filter_hook_args(hook, all_args)
|
|
@@ -1106,14 +1147,15 @@ class Team:
|
|
|
1106
1147
|
# Synchronous function
|
|
1107
1148
|
hook(**filtered_args)
|
|
1108
1149
|
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1150
|
+
if stream_events:
|
|
1151
|
+
yield handle_event( # type: ignore
|
|
1152
|
+
run_response=run_response,
|
|
1153
|
+
event=create_team_pre_hook_completed_event(
|
|
1154
|
+
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
|
|
1155
|
+
),
|
|
1156
|
+
events_to_skip=self.events_to_skip,
|
|
1157
|
+
store_events=self.store_events,
|
|
1158
|
+
)
|
|
1117
1159
|
|
|
1118
1160
|
except (InputCheckError, OutputCheckError) as e:
|
|
1119
1161
|
raise e
|
|
@@ -1135,6 +1177,7 @@ class Team:
|
|
|
1135
1177
|
run_context: RunContext,
|
|
1136
1178
|
user_id: Optional[str] = None,
|
|
1137
1179
|
debug_mode: Optional[bool] = None,
|
|
1180
|
+
stream_events: bool = False,
|
|
1138
1181
|
**kwargs: Any,
|
|
1139
1182
|
) -> Iterator[TeamRunOutputEvent]:
|
|
1140
1183
|
"""Execute multiple post-hook functions in succession."""
|
|
@@ -1156,30 +1199,32 @@ class Team:
|
|
|
1156
1199
|
all_args.update(kwargs)
|
|
1157
1200
|
|
|
1158
1201
|
for i, hook in enumerate(hooks):
|
|
1159
|
-
|
|
1160
|
-
run_response=run_output,
|
|
1161
|
-
event=create_team_post_hook_started_event( # type: ignore
|
|
1162
|
-
from_run_response=run_output,
|
|
1163
|
-
post_hook_name=hook.__name__,
|
|
1164
|
-
),
|
|
1165
|
-
events_to_skip=self.events_to_skip,
|
|
1166
|
-
store_events=self.store_events,
|
|
1167
|
-
)
|
|
1168
|
-
try:
|
|
1169
|
-
# Filter arguments to only include those that the hook accepts
|
|
1170
|
-
filtered_args = filter_hook_args(hook, all_args)
|
|
1171
|
-
|
|
1172
|
-
hook(**filtered_args)
|
|
1173
|
-
|
|
1202
|
+
if stream_events:
|
|
1174
1203
|
yield handle_event( # type: ignore
|
|
1175
1204
|
run_response=run_output,
|
|
1176
|
-
event=
|
|
1205
|
+
event=create_team_post_hook_started_event( # type: ignore
|
|
1177
1206
|
from_run_response=run_output,
|
|
1178
1207
|
post_hook_name=hook.__name__,
|
|
1179
1208
|
),
|
|
1180
1209
|
events_to_skip=self.events_to_skip,
|
|
1181
1210
|
store_events=self.store_events,
|
|
1182
1211
|
)
|
|
1212
|
+
try:
|
|
1213
|
+
# Filter arguments to only include those that the hook accepts
|
|
1214
|
+
filtered_args = filter_hook_args(hook, all_args)
|
|
1215
|
+
|
|
1216
|
+
hook(**filtered_args)
|
|
1217
|
+
|
|
1218
|
+
if stream_events:
|
|
1219
|
+
yield handle_event( # type: ignore
|
|
1220
|
+
run_response=run_output,
|
|
1221
|
+
event=create_team_post_hook_completed_event( # type: ignore
|
|
1222
|
+
from_run_response=run_output,
|
|
1223
|
+
post_hook_name=hook.__name__,
|
|
1224
|
+
),
|
|
1225
|
+
events_to_skip=self.events_to_skip,
|
|
1226
|
+
store_events=self.store_events,
|
|
1227
|
+
)
|
|
1183
1228
|
|
|
1184
1229
|
except (InputCheckError, OutputCheckError) as e:
|
|
1185
1230
|
raise e
|
|
@@ -1195,6 +1240,7 @@ class Team:
|
|
|
1195
1240
|
run_context: RunContext,
|
|
1196
1241
|
user_id: Optional[str] = None,
|
|
1197
1242
|
debug_mode: Optional[bool] = None,
|
|
1243
|
+
stream_events: bool = False,
|
|
1198
1244
|
**kwargs: Any,
|
|
1199
1245
|
) -> AsyncIterator[TeamRunOutputEvent]:
|
|
1200
1246
|
"""Execute multiple post-hook functions in succession (async version)."""
|
|
@@ -1216,15 +1262,16 @@ class Team:
|
|
|
1216
1262
|
all_args.update(kwargs)
|
|
1217
1263
|
|
|
1218
1264
|
for i, hook in enumerate(hooks):
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1265
|
+
if stream_events:
|
|
1266
|
+
yield handle_event( # type: ignore
|
|
1267
|
+
run_response=run_output,
|
|
1268
|
+
event=create_team_post_hook_started_event( # type: ignore
|
|
1269
|
+
from_run_response=run_output,
|
|
1270
|
+
post_hook_name=hook.__name__,
|
|
1271
|
+
),
|
|
1272
|
+
events_to_skip=self.events_to_skip,
|
|
1273
|
+
store_events=self.store_events,
|
|
1274
|
+
)
|
|
1228
1275
|
try:
|
|
1229
1276
|
# Filter arguments to only include those that the hook accepts
|
|
1230
1277
|
filtered_args = filter_hook_args(hook, all_args)
|
|
@@ -1236,15 +1283,16 @@ class Team:
|
|
|
1236
1283
|
else:
|
|
1237
1284
|
hook(**filtered_args)
|
|
1238
1285
|
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1286
|
+
if stream_events:
|
|
1287
|
+
yield handle_event( # type: ignore
|
|
1288
|
+
run_response=run_output,
|
|
1289
|
+
event=create_team_post_hook_completed_event( # type: ignore
|
|
1290
|
+
from_run_response=run_output,
|
|
1291
|
+
post_hook_name=hook.__name__,
|
|
1292
|
+
),
|
|
1293
|
+
events_to_skip=self.events_to_skip,
|
|
1294
|
+
store_events=self.store_events,
|
|
1295
|
+
)
|
|
1248
1296
|
except (InputCheckError, OutputCheckError) as e:
|
|
1249
1297
|
raise e
|
|
1250
1298
|
except Exception as e:
|
|
@@ -1374,6 +1422,7 @@ class Team:
|
|
|
1374
1422
|
tool_choice=self.tool_choice,
|
|
1375
1423
|
tool_call_limit=self.tool_call_limit,
|
|
1376
1424
|
send_media_to_model=self.send_media_to_model,
|
|
1425
|
+
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
1377
1426
|
)
|
|
1378
1427
|
|
|
1379
1428
|
# Check for cancellation after model call
|
|
@@ -1383,11 +1432,14 @@ class Team:
|
|
|
1383
1432
|
self._parse_response_with_output_model(model_response, run_messages)
|
|
1384
1433
|
|
|
1385
1434
|
# If a parser model is provided, structure the response separately
|
|
1386
|
-
self._parse_response_with_parser_model(model_response, run_messages)
|
|
1435
|
+
self._parse_response_with_parser_model(model_response, run_messages, run_context=run_context)
|
|
1387
1436
|
|
|
1388
1437
|
# 7. Update TeamRunOutput with the model response
|
|
1389
1438
|
self._update_run_response(
|
|
1390
|
-
model_response=model_response,
|
|
1439
|
+
model_response=model_response,
|
|
1440
|
+
run_response=run_response,
|
|
1441
|
+
run_messages=run_messages,
|
|
1442
|
+
run_context=run_context,
|
|
1391
1443
|
)
|
|
1392
1444
|
|
|
1393
1445
|
# 8. Store media if enabled
|
|
@@ -1395,7 +1447,7 @@ class Team:
|
|
|
1395
1447
|
store_media_util(run_response, model_response)
|
|
1396
1448
|
|
|
1397
1449
|
# 9. Convert response to structured format
|
|
1398
|
-
self._convert_response_to_structured_format(run_response=run_response)
|
|
1450
|
+
self._convert_response_to_structured_format(run_response=run_response, run_context=run_context)
|
|
1399
1451
|
|
|
1400
1452
|
# 10. Execute post-hooks after output is generated but before response is returned
|
|
1401
1453
|
if self.post_hooks is not None:
|
|
@@ -1497,6 +1549,7 @@ class Team:
|
|
|
1497
1549
|
session=session,
|
|
1498
1550
|
user_id=user_id,
|
|
1499
1551
|
debug_mode=debug_mode,
|
|
1552
|
+
stream_events=stream_events,
|
|
1500
1553
|
**kwargs,
|
|
1501
1554
|
)
|
|
1502
1555
|
for pre_hook_event in pre_hook_iterator:
|
|
@@ -1587,6 +1640,7 @@ class Team:
|
|
|
1587
1640
|
response_format=response_format,
|
|
1588
1641
|
stream_events=stream_events,
|
|
1589
1642
|
session_state=run_context.session_state,
|
|
1643
|
+
run_context=run_context,
|
|
1590
1644
|
):
|
|
1591
1645
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1592
1646
|
yield event
|
|
@@ -1599,6 +1653,7 @@ class Team:
|
|
|
1599
1653
|
response_format=response_format,
|
|
1600
1654
|
stream_events=stream_events,
|
|
1601
1655
|
session_state=run_context.session_state,
|
|
1656
|
+
run_context=run_context,
|
|
1602
1657
|
):
|
|
1603
1658
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1604
1659
|
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
|
|
@@ -1626,7 +1681,7 @@ class Team:
|
|
|
1626
1681
|
|
|
1627
1682
|
# 7. Parse response with parser model if provided
|
|
1628
1683
|
yield from self._parse_response_with_parser_model_stream(
|
|
1629
|
-
session=session, run_response=run_response, stream_events=stream_events
|
|
1684
|
+
session=session, run_response=run_response, stream_events=stream_events, run_context=run_context
|
|
1630
1685
|
)
|
|
1631
1686
|
|
|
1632
1687
|
# Yield RunContentCompletedEvent
|
|
@@ -1646,6 +1701,7 @@ class Team:
|
|
|
1646
1701
|
session=session,
|
|
1647
1702
|
user_id=user_id,
|
|
1648
1703
|
debug_mode=debug_mode,
|
|
1704
|
+
stream_events=stream_events,
|
|
1649
1705
|
**kwargs,
|
|
1650
1706
|
)
|
|
1651
1707
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
@@ -1757,6 +1813,7 @@ class Team:
|
|
|
1757
1813
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1758
1814
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1759
1815
|
debug_mode: Optional[bool] = None,
|
|
1816
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
1760
1817
|
**kwargs: Any,
|
|
1761
1818
|
) -> TeamRunOutput: ...
|
|
1762
1819
|
|
|
@@ -1784,8 +1841,9 @@ class Team:
|
|
|
1784
1841
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1785
1842
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1786
1843
|
debug_mode: Optional[bool] = None,
|
|
1787
|
-
yield_run_response: bool =
|
|
1844
|
+
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
1788
1845
|
yield_run_output: bool = False,
|
|
1846
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
1789
1847
|
**kwargs: Any,
|
|
1790
1848
|
) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]: ...
|
|
1791
1849
|
|
|
@@ -1812,8 +1870,9 @@ class Team:
|
|
|
1812
1870
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
1813
1871
|
metadata: Optional[Dict[str, Any]] = None,
|
|
1814
1872
|
debug_mode: Optional[bool] = None,
|
|
1815
|
-
yield_run_response: bool =
|
|
1873
|
+
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
1816
1874
|
yield_run_output: bool = False,
|
|
1875
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
1817
1876
|
**kwargs: Any,
|
|
1818
1877
|
) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
|
|
1819
1878
|
"""Run the Team and return the response."""
|
|
@@ -1878,6 +1937,10 @@ class Team:
|
|
|
1878
1937
|
# Determine runtime dependencies
|
|
1879
1938
|
dependencies = dependencies if dependencies is not None else self.dependencies
|
|
1880
1939
|
|
|
1940
|
+
# Resolve output_schema parameter takes precedence, then fall back to self.output_schema
|
|
1941
|
+
if output_schema is None:
|
|
1942
|
+
output_schema = self.output_schema
|
|
1943
|
+
|
|
1881
1944
|
# Initialize run context
|
|
1882
1945
|
run_context = run_context or RunContext(
|
|
1883
1946
|
run_id=run_id,
|
|
@@ -1885,7 +1948,10 @@ class Team:
|
|
|
1885
1948
|
user_id=user_id,
|
|
1886
1949
|
session_state=session_state,
|
|
1887
1950
|
dependencies=dependencies,
|
|
1951
|
+
output_schema=output_schema,
|
|
1888
1952
|
)
|
|
1953
|
+
# output_schema parameter takes priority, even if run_context was provided
|
|
1954
|
+
run_context.output_schema = output_schema
|
|
1889
1955
|
|
|
1890
1956
|
# Resolve callable dependencies if present
|
|
1891
1957
|
if run_context.dependencies is not None:
|
|
@@ -1923,10 +1989,6 @@ class Team:
|
|
|
1923
1989
|
self.stream = self.stream or stream
|
|
1924
1990
|
self.stream_events = self.stream_events or stream_events
|
|
1925
1991
|
|
|
1926
|
-
# Configure the model for runs
|
|
1927
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
|
|
1928
|
-
self._get_response_format() if self.parser_model is None else None
|
|
1929
|
-
)
|
|
1930
1992
|
self.model = cast(Model, self.model)
|
|
1931
1993
|
|
|
1932
1994
|
if self.metadata is not None:
|
|
@@ -1938,6 +2000,11 @@ class Team:
|
|
|
1938
2000
|
if metadata:
|
|
1939
2001
|
run_context.metadata = metadata
|
|
1940
2002
|
|
|
2003
|
+
# Configure the model for runs
|
|
2004
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
|
|
2005
|
+
self._get_response_format(run_context=run_context) if self.parser_model is None else None
|
|
2006
|
+
)
|
|
2007
|
+
|
|
1941
2008
|
# Create a new run_response for this attempt
|
|
1942
2009
|
run_response = TeamRunOutput(
|
|
1943
2010
|
run_id=run_id,
|
|
@@ -1964,7 +2031,7 @@ class Team:
|
|
|
1964
2031
|
last_exception = None
|
|
1965
2032
|
num_attempts = retries + 1
|
|
1966
2033
|
|
|
1967
|
-
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
2034
|
+
yield_run_output = bool(yield_run_output or yield_run_response) # For backwards compatibility
|
|
1968
2035
|
|
|
1969
2036
|
for attempt in range(num_attempts):
|
|
1970
2037
|
# Initialize the current run
|
|
@@ -2194,6 +2261,7 @@ class Team:
|
|
|
2194
2261
|
response_format=response_format,
|
|
2195
2262
|
send_media_to_model=self.send_media_to_model,
|
|
2196
2263
|
run_response=run_response,
|
|
2264
|
+
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
2197
2265
|
) # type: ignore
|
|
2198
2266
|
|
|
2199
2267
|
# Check for cancellation after model call
|
|
@@ -2203,11 +2271,16 @@ class Team:
|
|
|
2203
2271
|
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
2204
2272
|
|
|
2205
2273
|
# If a parser model is provided, structure the response separately
|
|
2206
|
-
await self._aparse_response_with_parser_model(
|
|
2274
|
+
await self._aparse_response_with_parser_model(
|
|
2275
|
+
model_response=model_response, run_messages=run_messages, run_context=run_context
|
|
2276
|
+
)
|
|
2207
2277
|
|
|
2208
2278
|
# 9. Update TeamRunOutput with the model response
|
|
2209
2279
|
self._update_run_response(
|
|
2210
|
-
model_response=model_response,
|
|
2280
|
+
model_response=model_response,
|
|
2281
|
+
run_response=run_response,
|
|
2282
|
+
run_messages=run_messages,
|
|
2283
|
+
run_context=run_context,
|
|
2211
2284
|
)
|
|
2212
2285
|
|
|
2213
2286
|
# 10. Store media if enabled
|
|
@@ -2215,7 +2288,7 @@ class Team:
|
|
|
2215
2288
|
store_media_util(run_response, model_response)
|
|
2216
2289
|
|
|
2217
2290
|
# 11. Convert response to structured format
|
|
2218
|
-
self._convert_response_to_structured_format(run_response=run_response)
|
|
2291
|
+
self._convert_response_to_structured_format(run_response=run_response, run_context=run_context)
|
|
2219
2292
|
|
|
2220
2293
|
# 12. Execute post-hooks after output is generated but before response is returned
|
|
2221
2294
|
if self.post_hooks is not None:
|
|
@@ -2351,6 +2424,7 @@ class Team:
|
|
|
2351
2424
|
session=team_session,
|
|
2352
2425
|
user_id=user_id,
|
|
2353
2426
|
debug_mode=debug_mode,
|
|
2427
|
+
stream_events=stream_events,
|
|
2354
2428
|
**kwargs,
|
|
2355
2429
|
)
|
|
2356
2430
|
async for pre_hook_event in pre_hook_iterator:
|
|
@@ -2441,6 +2515,7 @@ class Team:
|
|
|
2441
2515
|
response_format=response_format,
|
|
2442
2516
|
stream_events=stream_events,
|
|
2443
2517
|
session_state=run_context.session_state,
|
|
2518
|
+
run_context=run_context,
|
|
2444
2519
|
):
|
|
2445
2520
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2446
2521
|
yield event
|
|
@@ -2453,6 +2528,7 @@ class Team:
|
|
|
2453
2528
|
response_format=response_format,
|
|
2454
2529
|
stream_events=stream_events,
|
|
2455
2530
|
session_state=run_context.session_state,
|
|
2531
|
+
run_context=run_context,
|
|
2456
2532
|
):
|
|
2457
2533
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
2458
2534
|
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
|
|
@@ -2480,7 +2556,7 @@ class Team:
|
|
|
2480
2556
|
|
|
2481
2557
|
# 10. Parse response with parser model if provided
|
|
2482
2558
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
2483
|
-
session=team_session, run_response=run_response, stream_events=stream_events
|
|
2559
|
+
session=team_session, run_response=run_response, stream_events=stream_events, run_context=run_context
|
|
2484
2560
|
):
|
|
2485
2561
|
yield event
|
|
2486
2562
|
|
|
@@ -2502,6 +2578,7 @@ class Team:
|
|
|
2502
2578
|
session=team_session,
|
|
2503
2579
|
user_id=user_id,
|
|
2504
2580
|
debug_mode=debug_mode,
|
|
2581
|
+
stream_events=stream_events,
|
|
2505
2582
|
**kwargs,
|
|
2506
2583
|
):
|
|
2507
2584
|
yield event
|
|
@@ -2626,6 +2703,7 @@ class Team:
|
|
|
2626
2703
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2627
2704
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2628
2705
|
debug_mode: Optional[bool] = None,
|
|
2706
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
2629
2707
|
**kwargs: Any,
|
|
2630
2708
|
) -> TeamRunOutput: ...
|
|
2631
2709
|
|
|
@@ -2653,8 +2731,9 @@ class Team:
|
|
|
2653
2731
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2654
2732
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2655
2733
|
debug_mode: Optional[bool] = None,
|
|
2656
|
-
yield_run_response: bool =
|
|
2734
|
+
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
2657
2735
|
yield_run_output: bool = False,
|
|
2736
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
2658
2737
|
**kwargs: Any,
|
|
2659
2738
|
) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]]: ...
|
|
2660
2739
|
|
|
@@ -2681,8 +2760,9 @@ class Team:
|
|
|
2681
2760
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2682
2761
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2683
2762
|
debug_mode: Optional[bool] = None,
|
|
2684
|
-
yield_run_response: bool =
|
|
2763
|
+
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
2685
2764
|
yield_run_output: bool = False,
|
|
2765
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
2686
2766
|
**kwargs: Any,
|
|
2687
2767
|
) -> Union[TeamRunOutput, AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
|
|
2688
2768
|
"""Run the Team asynchronously and return the response."""
|
|
@@ -2766,11 +2846,6 @@ class Team:
|
|
|
2766
2846
|
self.stream = self.stream or stream
|
|
2767
2847
|
self.stream_events = self.stream_events or stream_events
|
|
2768
2848
|
|
|
2769
|
-
# Configure the model for runs
|
|
2770
|
-
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
|
|
2771
|
-
self._get_response_format() if self.parser_model is None else None
|
|
2772
|
-
)
|
|
2773
|
-
|
|
2774
2849
|
self.model = cast(Model, self.model)
|
|
2775
2850
|
|
|
2776
2851
|
if self.metadata is not None:
|
|
@@ -2784,6 +2859,10 @@ class Team:
|
|
|
2784
2859
|
if self.knowledge_filters or knowledge_filters:
|
|
2785
2860
|
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2786
2861
|
|
|
2862
|
+
# Resolve output_schema parameter takes precedence, then fall back to self.output_schema
|
|
2863
|
+
if output_schema is None:
|
|
2864
|
+
output_schema = self.output_schema
|
|
2865
|
+
|
|
2787
2866
|
# Initialize run context
|
|
2788
2867
|
run_context = run_context or RunContext(
|
|
2789
2868
|
run_id=run_id,
|
|
@@ -2793,6 +2872,14 @@ class Team:
|
|
|
2793
2872
|
dependencies=dependencies,
|
|
2794
2873
|
knowledge_filters=effective_filters,
|
|
2795
2874
|
metadata=metadata,
|
|
2875
|
+
output_schema=output_schema,
|
|
2876
|
+
)
|
|
2877
|
+
# output_schema parameter takes priority, even if run_context was provided
|
|
2878
|
+
run_context.output_schema = output_schema
|
|
2879
|
+
|
|
2880
|
+
# Configure the model for runs
|
|
2881
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
|
|
2882
|
+
self._get_response_format(run_context=run_context) if self.parser_model is None else None
|
|
2796
2883
|
)
|
|
2797
2884
|
|
|
2798
2885
|
# Create a new run_response for this attempt
|
|
@@ -2821,7 +2908,7 @@ class Team:
|
|
|
2821
2908
|
last_exception = None
|
|
2822
2909
|
num_attempts = retries + 1
|
|
2823
2910
|
|
|
2824
|
-
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
2911
|
+
yield_run_output = bool(yield_run_output or yield_run_response) # For backwards compatibility
|
|
2825
2912
|
|
|
2826
2913
|
for attempt in range(num_attempts):
|
|
2827
2914
|
# Run the team
|
|
@@ -2901,14 +2988,21 @@ class Team:
|
|
|
2901
2988
|
raise Exception(f"Failed after {num_attempts} attempts.")
|
|
2902
2989
|
|
|
2903
2990
|
def _update_run_response(
|
|
2904
|
-
self,
|
|
2991
|
+
self,
|
|
2992
|
+
model_response: ModelResponse,
|
|
2993
|
+
run_response: TeamRunOutput,
|
|
2994
|
+
run_messages: RunMessages,
|
|
2995
|
+
run_context: Optional[RunContext] = None,
|
|
2905
2996
|
):
|
|
2997
|
+
# Get output_schema from run_context
|
|
2998
|
+
output_schema = run_context.output_schema if run_context else None
|
|
2999
|
+
|
|
2906
3000
|
# Handle structured outputs
|
|
2907
|
-
if (
|
|
3001
|
+
if (output_schema is not None) and not self.use_json_mode and (model_response.parsed is not None):
|
|
2908
3002
|
# Update the run_response content with the structured output
|
|
2909
3003
|
run_response.content = model_response.parsed
|
|
2910
3004
|
# Update the run_response content_type with the structured output class name
|
|
2911
|
-
run_response.content_type =
|
|
3005
|
+
run_response.content_type = output_schema.__name__
|
|
2912
3006
|
else:
|
|
2913
3007
|
# Update the run_response content with the model response content
|
|
2914
3008
|
if not run_response.content:
|
|
@@ -2971,6 +3065,7 @@ class Team:
|
|
|
2971
3065
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2972
3066
|
stream_events: bool = False,
|
|
2973
3067
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3068
|
+
run_context: Optional[RunContext] = None,
|
|
2974
3069
|
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
2975
3070
|
self.model = cast(Model, self.model)
|
|
2976
3071
|
|
|
@@ -2979,8 +3074,12 @@ class Team:
|
|
|
2979
3074
|
"reasoning_time_taken": 0.0,
|
|
2980
3075
|
}
|
|
2981
3076
|
|
|
3077
|
+
# Get output_schema from run_context
|
|
3078
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3079
|
+
should_parse_structured_output = output_schema is not None and self.parse_response and self.parser_model is None
|
|
3080
|
+
|
|
2982
3081
|
stream_model_response = True
|
|
2983
|
-
if
|
|
3082
|
+
if should_parse_structured_output:
|
|
2984
3083
|
log_debug("Response model set, model response is not streamed.")
|
|
2985
3084
|
stream_model_response = False
|
|
2986
3085
|
|
|
@@ -2993,6 +3092,7 @@ class Team:
|
|
|
2993
3092
|
tool_call_limit=self.tool_call_limit,
|
|
2994
3093
|
stream_model_response=stream_model_response,
|
|
2995
3094
|
send_media_to_model=self.send_media_to_model,
|
|
3095
|
+
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
2996
3096
|
):
|
|
2997
3097
|
yield from self._handle_model_response_chunk(
|
|
2998
3098
|
session=session,
|
|
@@ -3001,8 +3101,9 @@ class Team:
|
|
|
3001
3101
|
model_response_event=model_response_event,
|
|
3002
3102
|
reasoning_state=reasoning_state,
|
|
3003
3103
|
stream_events=stream_events,
|
|
3004
|
-
parse_structured_output=
|
|
3104
|
+
parse_structured_output=should_parse_structured_output,
|
|
3005
3105
|
session_state=session_state,
|
|
3106
|
+
run_context=run_context,
|
|
3006
3107
|
)
|
|
3007
3108
|
|
|
3008
3109
|
# 3. Update TeamRunOutput
|
|
@@ -3055,6 +3156,7 @@ class Team:
|
|
|
3055
3156
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
3056
3157
|
stream_events: bool = False,
|
|
3057
3158
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3159
|
+
run_context: Optional[RunContext] = None,
|
|
3058
3160
|
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
3059
3161
|
self.model = cast(Model, self.model)
|
|
3060
3162
|
|
|
@@ -3063,8 +3165,12 @@ class Team:
|
|
|
3063
3165
|
"reasoning_time_taken": 0.0,
|
|
3064
3166
|
}
|
|
3065
3167
|
|
|
3168
|
+
# Get output_schema from run_context
|
|
3169
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3170
|
+
should_parse_structured_output = output_schema is not None and self.parse_response and self.parser_model is None
|
|
3171
|
+
|
|
3066
3172
|
stream_model_response = True
|
|
3067
|
-
if
|
|
3173
|
+
if should_parse_structured_output:
|
|
3068
3174
|
log_debug("Response model set, model response is not streamed.")
|
|
3069
3175
|
stream_model_response = False
|
|
3070
3176
|
|
|
@@ -3078,6 +3184,7 @@ class Team:
|
|
|
3078
3184
|
stream_model_response=stream_model_response,
|
|
3079
3185
|
send_media_to_model=self.send_media_to_model,
|
|
3080
3186
|
run_response=run_response,
|
|
3187
|
+
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
3081
3188
|
) # type: ignore
|
|
3082
3189
|
async for model_response_event in model_stream:
|
|
3083
3190
|
for event in self._handle_model_response_chunk(
|
|
@@ -3087,13 +3194,17 @@ class Team:
|
|
|
3087
3194
|
model_response_event=model_response_event,
|
|
3088
3195
|
reasoning_state=reasoning_state,
|
|
3089
3196
|
stream_events=stream_events,
|
|
3090
|
-
parse_structured_output=
|
|
3197
|
+
parse_structured_output=should_parse_structured_output,
|
|
3091
3198
|
session_state=session_state,
|
|
3199
|
+
run_context=run_context,
|
|
3092
3200
|
):
|
|
3093
3201
|
yield event
|
|
3094
3202
|
|
|
3203
|
+
# Get output_schema from run_context
|
|
3204
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3205
|
+
|
|
3095
3206
|
# Handle structured outputs
|
|
3096
|
-
if (
|
|
3207
|
+
if (output_schema is not None) and not self.use_json_mode and (full_model_response.parsed is not None):
|
|
3097
3208
|
# Update the run_response content with the structured output
|
|
3098
3209
|
run_response.content = full_model_response.parsed
|
|
3099
3210
|
|
|
@@ -3144,6 +3255,7 @@ class Team:
|
|
|
3144
3255
|
stream_events: bool = False,
|
|
3145
3256
|
parse_structured_output: bool = False,
|
|
3146
3257
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3258
|
+
run_context: Optional[RunContext] = None,
|
|
3147
3259
|
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
|
|
3148
3260
|
if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
|
|
3149
3261
|
model_response_event, tuple(get_args(TeamRunOutputEvent))
|
|
@@ -3158,6 +3270,7 @@ class Team:
|
|
|
3158
3270
|
model_response_event.session_id = session.session_id # type: ignore
|
|
3159
3271
|
if not model_response_event.run_id: # type: ignore
|
|
3160
3272
|
model_response_event.run_id = run_response.run_id # type: ignore
|
|
3273
|
+
|
|
3161
3274
|
# We just bubble the event up
|
|
3162
3275
|
yield handle_event( # type: ignore
|
|
3163
3276
|
model_response_event, # type: ignore
|
|
@@ -3179,12 +3292,14 @@ class Team:
|
|
|
3179
3292
|
if model_response_event.content is not None:
|
|
3180
3293
|
if parse_structured_output:
|
|
3181
3294
|
full_model_response.content = model_response_event.content
|
|
3182
|
-
self._convert_response_to_structured_format(full_model_response)
|
|
3183
|
-
|
|
3295
|
+
self._convert_response_to_structured_format(full_model_response, run_context=run_context)
|
|
3296
|
+
# Get output_schema from run_context
|
|
3297
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3298
|
+
content_type = output_schema.__name__ # type: ignore
|
|
3184
3299
|
run_response.content_type = content_type
|
|
3185
3300
|
elif self._member_response_model is not None:
|
|
3186
3301
|
full_model_response.content = model_response_event.content
|
|
3187
|
-
self._convert_response_to_structured_format(full_model_response)
|
|
3302
|
+
self._convert_response_to_structured_format(full_model_response, run_context=run_context)
|
|
3188
3303
|
content_type = self._member_response_model.__name__ # type: ignore
|
|
3189
3304
|
run_response.content_type = content_type
|
|
3190
3305
|
elif isinstance(model_response_event.content, str):
|
|
@@ -3311,15 +3426,16 @@ class Team:
|
|
|
3311
3426
|
run_response.tools.extend(tool_executions_list)
|
|
3312
3427
|
|
|
3313
3428
|
for tool in tool_executions_list:
|
|
3314
|
-
|
|
3315
|
-
|
|
3316
|
-
|
|
3317
|
-
|
|
3318
|
-
|
|
3319
|
-
|
|
3320
|
-
|
|
3321
|
-
|
|
3322
|
-
|
|
3429
|
+
if stream_events:
|
|
3430
|
+
yield handle_event( # type: ignore
|
|
3431
|
+
create_team_tool_call_started_event(
|
|
3432
|
+
from_run_response=run_response,
|
|
3433
|
+
tool=tool,
|
|
3434
|
+
),
|
|
3435
|
+
run_response,
|
|
3436
|
+
events_to_skip=self.events_to_skip,
|
|
3437
|
+
store_events=self.store_events,
|
|
3438
|
+
)
|
|
3323
3439
|
|
|
3324
3440
|
# If the model response is a tool_call_completed, update the existing tool call in the run_response
|
|
3325
3441
|
elif model_response_event.event == ModelResponseEvent.tool_call_completed.value:
|
|
@@ -3393,16 +3509,17 @@ class Team:
|
|
|
3393
3509
|
"reasoning_time_taken"
|
|
3394
3510
|
] + float(metrics.duration)
|
|
3395
3511
|
|
|
3396
|
-
|
|
3397
|
-
|
|
3398
|
-
|
|
3399
|
-
|
|
3400
|
-
|
|
3401
|
-
|
|
3402
|
-
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3512
|
+
if stream_events:
|
|
3513
|
+
yield handle_event( # type: ignore
|
|
3514
|
+
create_team_tool_call_completed_event(
|
|
3515
|
+
from_run_response=run_response,
|
|
3516
|
+
tool=tool_call,
|
|
3517
|
+
content=model_response_event.content,
|
|
3518
|
+
),
|
|
3519
|
+
run_response,
|
|
3520
|
+
events_to_skip=self.events_to_skip,
|
|
3521
|
+
store_events=self.store_events,
|
|
3522
|
+
)
|
|
3406
3523
|
|
|
3407
3524
|
if stream_events:
|
|
3408
3525
|
if reasoning_step is not None:
|
|
@@ -3428,18 +3545,23 @@ class Team:
|
|
|
3428
3545
|
store_events=self.store_events,
|
|
3429
3546
|
)
|
|
3430
3547
|
|
|
3431
|
-
def _convert_response_to_structured_format(
|
|
3548
|
+
def _convert_response_to_structured_format(
|
|
3549
|
+
self, run_response: Union[TeamRunOutput, RunOutput, ModelResponse], run_context: Optional[RunContext] = None
|
|
3550
|
+
):
|
|
3551
|
+
# Get output_schema from run_context
|
|
3552
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3553
|
+
|
|
3432
3554
|
# Convert the response to the structured format if needed
|
|
3433
|
-
if
|
|
3555
|
+
if output_schema is not None and not isinstance(run_response.content, output_schema):
|
|
3434
3556
|
if isinstance(run_response.content, str) and self.parse_response:
|
|
3435
3557
|
try:
|
|
3436
|
-
parsed_response_content = parse_response_model_str(run_response.content,
|
|
3558
|
+
parsed_response_content = parse_response_model_str(run_response.content, output_schema)
|
|
3437
3559
|
|
|
3438
3560
|
# Update TeamRunOutput
|
|
3439
3561
|
if parsed_response_content is not None:
|
|
3440
3562
|
run_response.content = parsed_response_content
|
|
3441
3563
|
if hasattr(run_response, "content_type"):
|
|
3442
|
-
run_response.content_type =
|
|
3564
|
+
run_response.content_type = output_schema.__name__
|
|
3443
3565
|
else:
|
|
3444
3566
|
log_warning("Failed to convert response to output_schema")
|
|
3445
3567
|
except Exception as e:
|
|
@@ -3532,9 +3654,14 @@ class Team:
|
|
|
3532
3654
|
team_id=self.id,
|
|
3533
3655
|
)
|
|
3534
3656
|
|
|
3535
|
-
def _get_response_format(
|
|
3657
|
+
def _get_response_format(
|
|
3658
|
+
self, model: Optional[Model] = None, run_context: Optional[RunContext] = None
|
|
3659
|
+
) -> Optional[Union[Dict, Type[BaseModel]]]:
|
|
3536
3660
|
model = cast(Model, model or self.model)
|
|
3537
|
-
|
|
3661
|
+
# Get output_schema from run_context
|
|
3662
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3663
|
+
|
|
3664
|
+
if output_schema is None:
|
|
3538
3665
|
return None
|
|
3539
3666
|
else:
|
|
3540
3667
|
json_response_format = {"type": "json_object"}
|
|
@@ -3542,7 +3669,7 @@ class Team:
|
|
|
3542
3669
|
if model.supports_native_structured_outputs:
|
|
3543
3670
|
if not self.use_json_mode:
|
|
3544
3671
|
log_debug("Setting Model.response_format to Agent.output_schema")
|
|
3545
|
-
return
|
|
3672
|
+
return output_schema
|
|
3546
3673
|
else:
|
|
3547
3674
|
log_debug(
|
|
3548
3675
|
"Model supports native structured outputs but it is not enabled. Using JSON mode instead."
|
|
@@ -3555,8 +3682,8 @@ class Team:
|
|
|
3555
3682
|
return {
|
|
3556
3683
|
"type": "json_schema",
|
|
3557
3684
|
"json_schema": {
|
|
3558
|
-
"name":
|
|
3559
|
-
"schema":
|
|
3685
|
+
"name": output_schema.__name__,
|
|
3686
|
+
"schema": output_schema.model_json_schema(),
|
|
3560
3687
|
},
|
|
3561
3688
|
}
|
|
3562
3689
|
else:
|
|
@@ -3587,14 +3714,21 @@ class Team:
|
|
|
3587
3714
|
else:
|
|
3588
3715
|
log_warning("Unable to parse response with parser model")
|
|
3589
3716
|
|
|
3590
|
-
def _parse_response_with_parser_model(
|
|
3717
|
+
def _parse_response_with_parser_model(
|
|
3718
|
+
self, model_response: ModelResponse, run_messages: RunMessages, run_context: Optional[RunContext] = None
|
|
3719
|
+
) -> None:
|
|
3591
3720
|
"""Parse the model response using the parser model."""
|
|
3592
3721
|
if self.parser_model is None:
|
|
3593
3722
|
return
|
|
3594
3723
|
|
|
3595
|
-
|
|
3596
|
-
|
|
3597
|
-
|
|
3724
|
+
# Get output_schema from run_context
|
|
3725
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3726
|
+
|
|
3727
|
+
if output_schema is not None:
|
|
3728
|
+
parser_response_format = self._get_response_format(self.parser_model, run_context=run_context)
|
|
3729
|
+
messages_for_parser_model = self._get_messages_for_parser_model(
|
|
3730
|
+
model_response, parser_response_format, run_context=run_context
|
|
3731
|
+
)
|
|
3598
3732
|
parser_model_response: ModelResponse = self.parser_model.response(
|
|
3599
3733
|
messages=messages_for_parser_model,
|
|
3600
3734
|
response_format=parser_response_format,
|
|
@@ -3606,15 +3740,20 @@ class Team:
|
|
|
3606
3740
|
log_warning("A response model is required to parse the response with a parser model")
|
|
3607
3741
|
|
|
3608
3742
|
async def _aparse_response_with_parser_model(
|
|
3609
|
-
self, model_response: ModelResponse, run_messages: RunMessages
|
|
3743
|
+
self, model_response: ModelResponse, run_messages: RunMessages, run_context: Optional[RunContext] = None
|
|
3610
3744
|
) -> None:
|
|
3611
3745
|
"""Parse the model response using the parser model."""
|
|
3612
3746
|
if self.parser_model is None:
|
|
3613
3747
|
return
|
|
3614
3748
|
|
|
3615
|
-
|
|
3616
|
-
|
|
3617
|
-
|
|
3749
|
+
# Get output_schema from run_context
|
|
3750
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3751
|
+
|
|
3752
|
+
if output_schema is not None:
|
|
3753
|
+
parser_response_format = self._get_response_format(self.parser_model, run_context=run_context)
|
|
3754
|
+
messages_for_parser_model = self._get_messages_for_parser_model(
|
|
3755
|
+
model_response, parser_response_format, run_context=run_context
|
|
3756
|
+
)
|
|
3618
3757
|
parser_model_response: ModelResponse = await self.parser_model.aresponse(
|
|
3619
3758
|
messages=messages_for_parser_model,
|
|
3620
3759
|
response_format=parser_response_format,
|
|
@@ -3630,10 +3769,15 @@ class Team:
|
|
|
3630
3769
|
session: TeamSession,
|
|
3631
3770
|
run_response: TeamRunOutput,
|
|
3632
3771
|
stream_events: bool = False,
|
|
3772
|
+
run_context: Optional[RunContext] = None,
|
|
3633
3773
|
):
|
|
3634
3774
|
"""Parse the model response using the parser model"""
|
|
3635
3775
|
if self.parser_model is not None:
|
|
3636
|
-
|
|
3776
|
+
# run_context override for output_schema
|
|
3777
|
+
# Get output_schema from run_context
|
|
3778
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3779
|
+
|
|
3780
|
+
if output_schema is not None:
|
|
3637
3781
|
if stream_events:
|
|
3638
3782
|
yield handle_event( # type: ignore
|
|
3639
3783
|
create_team_parser_model_response_started_event(run_response),
|
|
@@ -3643,9 +3787,9 @@ class Team:
|
|
|
3643
3787
|
)
|
|
3644
3788
|
|
|
3645
3789
|
parser_model_response = ModelResponse(content="")
|
|
3646
|
-
parser_response_format = self._get_response_format(self.parser_model)
|
|
3790
|
+
parser_response_format = self._get_response_format(self.parser_model, run_context=run_context)
|
|
3647
3791
|
messages_for_parser_model = self._get_messages_for_parser_model_stream(
|
|
3648
|
-
run_response, parser_response_format
|
|
3792
|
+
run_response, parser_response_format, run_context=run_context
|
|
3649
3793
|
)
|
|
3650
3794
|
for model_response_event in self.parser_model.response_stream(
|
|
3651
3795
|
messages=messages_for_parser_model,
|
|
@@ -3659,6 +3803,7 @@ class Team:
|
|
|
3659
3803
|
model_response_event=model_response_event,
|
|
3660
3804
|
parse_structured_output=True,
|
|
3661
3805
|
stream_events=stream_events,
|
|
3806
|
+
run_context=run_context,
|
|
3662
3807
|
)
|
|
3663
3808
|
|
|
3664
3809
|
run_response.content = parser_model_response.content
|
|
@@ -3686,11 +3831,19 @@ class Team:
|
|
|
3686
3831
|
log_warning("A response model is required to parse the response with a parser model")
|
|
3687
3832
|
|
|
3688
3833
|
async def _aparse_response_with_parser_model_stream(
|
|
3689
|
-
self,
|
|
3834
|
+
self,
|
|
3835
|
+
session: TeamSession,
|
|
3836
|
+
run_response: TeamRunOutput,
|
|
3837
|
+
stream_events: bool = False,
|
|
3838
|
+
run_context: Optional[RunContext] = None,
|
|
3690
3839
|
):
|
|
3691
3840
|
"""Parse the model response using the parser model stream."""
|
|
3692
3841
|
if self.parser_model is not None:
|
|
3693
|
-
|
|
3842
|
+
# run_context override for output_schema
|
|
3843
|
+
# Get output_schema from run_context
|
|
3844
|
+
output_schema = run_context.output_schema if run_context else None
|
|
3845
|
+
|
|
3846
|
+
if output_schema is not None:
|
|
3694
3847
|
if stream_events:
|
|
3695
3848
|
yield handle_event( # type: ignore
|
|
3696
3849
|
create_team_parser_model_response_started_event(run_response),
|
|
@@ -3700,9 +3853,9 @@ class Team:
|
|
|
3700
3853
|
)
|
|
3701
3854
|
|
|
3702
3855
|
parser_model_response = ModelResponse(content="")
|
|
3703
|
-
parser_response_format = self._get_response_format(self.parser_model)
|
|
3856
|
+
parser_response_format = self._get_response_format(self.parser_model, run_context=run_context)
|
|
3704
3857
|
messages_for_parser_model = self._get_messages_for_parser_model_stream(
|
|
3705
|
-
run_response, parser_response_format
|
|
3858
|
+
run_response, parser_response_format, run_context=run_context
|
|
3706
3859
|
)
|
|
3707
3860
|
model_response_stream = self.parser_model.aresponse_stream(
|
|
3708
3861
|
messages=messages_for_parser_model,
|
|
@@ -3717,6 +3870,7 @@ class Team:
|
|
|
3717
3870
|
model_response_event=model_response_event,
|
|
3718
3871
|
parse_structured_output=True,
|
|
3719
3872
|
stream_events=stream_events,
|
|
3873
|
+
run_context=run_context,
|
|
3720
3874
|
):
|
|
3721
3875
|
yield event
|
|
3722
3876
|
|
|
@@ -4211,7 +4365,10 @@ class Team:
|
|
|
4211
4365
|
for tool in self.tools:
|
|
4212
4366
|
if isawaitable(tool):
|
|
4213
4367
|
raise NotImplementedError("Use `acli_app` to use async tools.")
|
|
4214
|
-
|
|
4368
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
4369
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
4370
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
4371
|
+
):
|
|
4215
4372
|
raise NotImplementedError("Use `acli_app` to use MCP tools.")
|
|
4216
4373
|
|
|
4217
4374
|
if input:
|
|
@@ -4985,7 +5142,10 @@ class Team:
|
|
|
4985
5142
|
# Add provided tools
|
|
4986
5143
|
if self.tools is not None:
|
|
4987
5144
|
for tool in self.tools:
|
|
4988
|
-
|
|
5145
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
5146
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
5147
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
5148
|
+
):
|
|
4989
5149
|
if tool.refresh_connection: # type: ignore
|
|
4990
5150
|
try:
|
|
4991
5151
|
is_alive = await tool.is_alive() # type: ignore
|
|
@@ -5027,7 +5187,10 @@ class Team:
|
|
|
5027
5187
|
# Add provided tools
|
|
5028
5188
|
if self.tools is not None:
|
|
5029
5189
|
for tool in self.tools:
|
|
5030
|
-
|
|
5190
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
5191
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
5192
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
5193
|
+
):
|
|
5031
5194
|
# Only add the tool if it successfully connected and built its tools
|
|
5032
5195
|
if check_mcp_tools and not tool.initialized: # type: ignore
|
|
5033
5196
|
continue
|
|
@@ -5129,9 +5292,12 @@ class Team:
|
|
|
5129
5292
|
_function_names = []
|
|
5130
5293
|
_functions: List[Union[Function, dict]] = []
|
|
5131
5294
|
|
|
5295
|
+
# Get output_schema from run_context
|
|
5296
|
+
output_schema = run_context.output_schema if run_context else None
|
|
5297
|
+
|
|
5132
5298
|
# Check if we need strict mode for the model
|
|
5133
5299
|
strict = False
|
|
5134
|
-
if
|
|
5300
|
+
if output_schema is not None and not self.use_json_mode and model.supports_native_structured_outputs:
|
|
5135
5301
|
strict = True
|
|
5136
5302
|
|
|
5137
5303
|
for tool in _tools:
|
|
@@ -5291,6 +5457,9 @@ class Team:
|
|
|
5291
5457
|
dependencies = run_context.dependencies or dependencies
|
|
5292
5458
|
metadata = run_context.metadata or metadata
|
|
5293
5459
|
|
|
5460
|
+
# Get output_schema from run_context
|
|
5461
|
+
output_schema = run_context.output_schema if run_context else None
|
|
5462
|
+
|
|
5294
5463
|
# 1. If the system_message is provided, use that.
|
|
5295
5464
|
if self.system_message is not None:
|
|
5296
5465
|
if isinstance(self.system_message, Message):
|
|
@@ -5300,7 +5469,13 @@ class Team:
|
|
|
5300
5469
|
if isinstance(self.system_message, str):
|
|
5301
5470
|
sys_message_content = self.system_message
|
|
5302
5471
|
elif callable(self.system_message):
|
|
5303
|
-
sys_message_content =
|
|
5472
|
+
sys_message_content = execute_system_message(
|
|
5473
|
+
system_message=self.system_message,
|
|
5474
|
+
agent=self,
|
|
5475
|
+
team=self,
|
|
5476
|
+
session_state=session_state,
|
|
5477
|
+
run_context=run_context,
|
|
5478
|
+
)
|
|
5304
5479
|
if not isinstance(sys_message_content, str):
|
|
5305
5480
|
raise Exception("system_message must return a string")
|
|
5306
5481
|
|
|
@@ -5324,15 +5499,13 @@ class Team:
|
|
|
5324
5499
|
if self.instructions is not None:
|
|
5325
5500
|
_instructions = self.instructions
|
|
5326
5501
|
if callable(self.instructions):
|
|
5327
|
-
|
|
5328
|
-
|
|
5329
|
-
|
|
5330
|
-
|
|
5331
|
-
|
|
5332
|
-
|
|
5333
|
-
|
|
5334
|
-
else:
|
|
5335
|
-
_instructions = self.instructions()
|
|
5502
|
+
_instructions = execute_instructions(
|
|
5503
|
+
instructions=self.instructions,
|
|
5504
|
+
agent=self,
|
|
5505
|
+
team=self,
|
|
5506
|
+
session_state=session_state,
|
|
5507
|
+
run_context=run_context,
|
|
5508
|
+
)
|
|
5336
5509
|
|
|
5337
5510
|
if isinstance(_instructions, str):
|
|
5338
5511
|
instructions.append(_instructions)
|
|
@@ -5347,7 +5520,7 @@ class Team:
|
|
|
5347
5520
|
# 1.3 Build a list of additional information for the system message
|
|
5348
5521
|
additional_information: List[str] = []
|
|
5349
5522
|
# 1.3.1 Add instructions for using markdown
|
|
5350
|
-
if self.markdown and
|
|
5523
|
+
if self.markdown and output_schema is None:
|
|
5351
5524
|
additional_information.append("Use markdown to format your answers.")
|
|
5352
5525
|
# 1.3.2 Add the current datetime
|
|
5353
5526
|
if self.add_datetime_to_context:
|
|
@@ -5558,14 +5731,18 @@ class Team:
|
|
|
5558
5731
|
if add_session_state_to_context and session_state is not None:
|
|
5559
5732
|
system_message_content += self._get_formatted_session_state_for_system_message(session_state)
|
|
5560
5733
|
|
|
5561
|
-
# Add the JSON output prompt if output_schema is provided and
|
|
5734
|
+
# Add the JSON output prompt if output_schema is provided and the model does not support native structured outputs
|
|
5735
|
+
# or JSON schema outputs, or if use_json_mode is True
|
|
5562
5736
|
if (
|
|
5563
|
-
|
|
5564
|
-
and self.
|
|
5737
|
+
output_schema is not None
|
|
5738
|
+
and self.parser_model is None
|
|
5565
5739
|
and self.model
|
|
5566
|
-
and
|
|
5740
|
+
and not (
|
|
5741
|
+
(self.model.supports_native_structured_outputs or self.model.supports_json_schema_outputs)
|
|
5742
|
+
and not self.use_json_mode
|
|
5743
|
+
)
|
|
5567
5744
|
):
|
|
5568
|
-
system_message_content += f"{self._get_json_output_prompt()}"
|
|
5745
|
+
system_message_content += f"{self._get_json_output_prompt(output_schema)}"
|
|
5569
5746
|
|
|
5570
5747
|
return Message(role=self.system_message_role, content=system_message_content.strip())
|
|
5571
5748
|
|
|
@@ -5592,6 +5769,9 @@ class Team:
|
|
|
5592
5769
|
dependencies = run_context.dependencies or dependencies
|
|
5593
5770
|
metadata = run_context.metadata or metadata
|
|
5594
5771
|
|
|
5772
|
+
# Get output_schema from run_context
|
|
5773
|
+
output_schema = run_context.output_schema if run_context else None
|
|
5774
|
+
|
|
5595
5775
|
# 1. If the system_message is provided, use that.
|
|
5596
5776
|
if self.system_message is not None:
|
|
5597
5777
|
if isinstance(self.system_message, Message):
|
|
@@ -5601,7 +5781,13 @@ class Team:
|
|
|
5601
5781
|
if isinstance(self.system_message, str):
|
|
5602
5782
|
sys_message_content = self.system_message
|
|
5603
5783
|
elif callable(self.system_message):
|
|
5604
|
-
sys_message_content =
|
|
5784
|
+
sys_message_content = await aexecute_system_message(
|
|
5785
|
+
system_message=self.system_message,
|
|
5786
|
+
agent=self,
|
|
5787
|
+
team=self,
|
|
5788
|
+
session_state=session_state,
|
|
5789
|
+
run_context=run_context,
|
|
5790
|
+
)
|
|
5605
5791
|
if not isinstance(sys_message_content, str):
|
|
5606
5792
|
raise Exception("system_message must return a string")
|
|
5607
5793
|
|
|
@@ -5625,15 +5811,13 @@ class Team:
|
|
|
5625
5811
|
if self.instructions is not None:
|
|
5626
5812
|
_instructions = self.instructions
|
|
5627
5813
|
if callable(self.instructions):
|
|
5628
|
-
|
|
5629
|
-
|
|
5630
|
-
|
|
5631
|
-
|
|
5632
|
-
|
|
5633
|
-
|
|
5634
|
-
|
|
5635
|
-
else:
|
|
5636
|
-
_instructions = self.instructions()
|
|
5814
|
+
_instructions = await aexecute_instructions(
|
|
5815
|
+
instructions=self.instructions,
|
|
5816
|
+
agent=self,
|
|
5817
|
+
team=self,
|
|
5818
|
+
session_state=session_state,
|
|
5819
|
+
run_context=run_context,
|
|
5820
|
+
)
|
|
5637
5821
|
|
|
5638
5822
|
if isinstance(_instructions, str):
|
|
5639
5823
|
instructions.append(_instructions)
|
|
@@ -5648,7 +5832,7 @@ class Team:
|
|
|
5648
5832
|
# 1.3 Build a list of additional information for the system message
|
|
5649
5833
|
additional_information: List[str] = []
|
|
5650
5834
|
# 1.3.1 Add instructions for using markdown
|
|
5651
|
-
if self.markdown and
|
|
5835
|
+
if self.markdown and output_schema is None:
|
|
5652
5836
|
additional_information.append("Use markdown to format your answers.")
|
|
5653
5837
|
# 1.3.2 Add the current datetime
|
|
5654
5838
|
if self.add_datetime_to_context:
|
|
@@ -5864,14 +6048,18 @@ class Team:
|
|
|
5864
6048
|
if add_session_state_to_context and session_state is not None:
|
|
5865
6049
|
system_message_content += self._get_formatted_session_state_for_system_message(session_state)
|
|
5866
6050
|
|
|
5867
|
-
# Add the JSON output prompt if output_schema is provided and
|
|
6051
|
+
# Add the JSON output prompt if output_schema is provided and the model does not support native structured outputs
|
|
6052
|
+
# or JSON schema outputs, or if use_json_mode is True
|
|
5868
6053
|
if (
|
|
5869
|
-
|
|
5870
|
-
and self.
|
|
6054
|
+
output_schema is not None
|
|
6055
|
+
and self.parser_model is None
|
|
5871
6056
|
and self.model
|
|
5872
|
-
and
|
|
6057
|
+
and not (
|
|
6058
|
+
(self.model.supports_native_structured_outputs or self.model.supports_json_schema_outputs)
|
|
6059
|
+
and not self.use_json_mode
|
|
6060
|
+
)
|
|
5873
6061
|
):
|
|
5874
|
-
system_message_content += f"{self._get_json_output_prompt()}"
|
|
6062
|
+
system_message_content += f"{self._get_json_output_prompt(output_schema)}"
|
|
5875
6063
|
|
|
5876
6064
|
return Message(role=self.system_message_role, content=system_message_content.strip())
|
|
5877
6065
|
|
|
@@ -6124,7 +6312,7 @@ class Team:
|
|
|
6124
6312
|
|
|
6125
6313
|
# 5. Add user message to run_messages (message second as per Dirk's requirement)
|
|
6126
6314
|
# 5.1 Build user message if message is None, str or list
|
|
6127
|
-
user_message = self.
|
|
6315
|
+
user_message = await self._aget_user_message(
|
|
6128
6316
|
run_response=run_response,
|
|
6129
6317
|
run_context=run_context,
|
|
6130
6318
|
input_message=input_message,
|
|
@@ -6298,20 +6486,181 @@ class Team:
|
|
|
6298
6486
|
**kwargs,
|
|
6299
6487
|
)
|
|
6300
6488
|
|
|
6489
|
+
async def _aget_user_message(
|
|
6490
|
+
self,
|
|
6491
|
+
*,
|
|
6492
|
+
run_response: TeamRunOutput,
|
|
6493
|
+
run_context: RunContext,
|
|
6494
|
+
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
|
|
6495
|
+
user_id: Optional[str] = None,
|
|
6496
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
6497
|
+
images: Optional[Sequence[Image]] = None,
|
|
6498
|
+
videos: Optional[Sequence[Video]] = None,
|
|
6499
|
+
files: Optional[Sequence[File]] = None,
|
|
6500
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
6501
|
+
**kwargs,
|
|
6502
|
+
):
|
|
6503
|
+
# Get references from the knowledge base to use in the user message
|
|
6504
|
+
references = None
|
|
6505
|
+
|
|
6506
|
+
if input_message is None:
|
|
6507
|
+
# If we have any media, return a message with empty content
|
|
6508
|
+
if images is not None or audio is not None or videos is not None or files is not None:
|
|
6509
|
+
return Message(
|
|
6510
|
+
role="user",
|
|
6511
|
+
content="",
|
|
6512
|
+
images=None if not self.send_media_to_model else images,
|
|
6513
|
+
audio=None if not self.send_media_to_model else audio,
|
|
6514
|
+
videos=None if not self.send_media_to_model else videos,
|
|
6515
|
+
files=None if not self.send_media_to_model else files,
|
|
6516
|
+
**kwargs,
|
|
6517
|
+
)
|
|
6518
|
+
else:
|
|
6519
|
+
# If the input is None, return None
|
|
6520
|
+
return None
|
|
6521
|
+
|
|
6522
|
+
else:
|
|
6523
|
+
if isinstance(input_message, list):
|
|
6524
|
+
input_content: Union[str, list[Any], list[Message]]
|
|
6525
|
+
if len(input_message) > 0 and isinstance(input_message[0], dict) and "type" in input_message[0]:
|
|
6526
|
+
# This is multimodal content (text + images/audio/video), preserve the structure
|
|
6527
|
+
input_content = input_message
|
|
6528
|
+
elif len(input_message) > 0 and isinstance(input_message[0], Message):
|
|
6529
|
+
# This is a list of Message objects, extract text content from them
|
|
6530
|
+
input_content = get_text_from_message(input_message)
|
|
6531
|
+
elif all(isinstance(item, str) for item in input_message):
|
|
6532
|
+
input_content = "\n".join([str(item) for item in input_message])
|
|
6533
|
+
else:
|
|
6534
|
+
input_content = str(input_message)
|
|
6535
|
+
|
|
6536
|
+
return Message(
|
|
6537
|
+
role="user",
|
|
6538
|
+
content=input_content,
|
|
6539
|
+
images=None if not self.send_media_to_model else images,
|
|
6540
|
+
audio=None if not self.send_media_to_model else audio,
|
|
6541
|
+
videos=None if not self.send_media_to_model else videos,
|
|
6542
|
+
files=None if not self.send_media_to_model else files,
|
|
6543
|
+
**kwargs,
|
|
6544
|
+
)
|
|
6545
|
+
|
|
6546
|
+
# If message is provided as a Message, use it directly
|
|
6547
|
+
elif isinstance(input_message, Message):
|
|
6548
|
+
return input_message
|
|
6549
|
+
# If message is provided as a dict, try to validate it as a Message
|
|
6550
|
+
elif isinstance(input_message, dict):
|
|
6551
|
+
try:
|
|
6552
|
+
if self.input_schema and is_typed_dict(self.input_schema):
|
|
6553
|
+
import json
|
|
6554
|
+
|
|
6555
|
+
content = json.dumps(input_message, indent=2, ensure_ascii=False)
|
|
6556
|
+
return Message(role="user", content=content)
|
|
6557
|
+
else:
|
|
6558
|
+
return Message.model_validate(input_message)
|
|
6559
|
+
except Exception as e:
|
|
6560
|
+
log_warning(f"Failed to validate input: {e}")
|
|
6561
|
+
|
|
6562
|
+
# If message is provided as a BaseModel, convert it to a Message
|
|
6563
|
+
elif isinstance(input_message, BaseModel):
|
|
6564
|
+
try:
|
|
6565
|
+
# Create a user message with the BaseModel content
|
|
6566
|
+
content = input_message.model_dump_json(indent=2, exclude_none=True)
|
|
6567
|
+
return Message(role="user", content=content)
|
|
6568
|
+
except Exception as e:
|
|
6569
|
+
log_warning(f"Failed to convert BaseModel to message: {e}")
|
|
6570
|
+
else:
|
|
6571
|
+
user_msg_content = input_message
|
|
6572
|
+
if self.add_knowledge_to_context:
|
|
6573
|
+
if isinstance(input_message, str):
|
|
6574
|
+
user_msg_content = input_message
|
|
6575
|
+
elif callable(input_message):
|
|
6576
|
+
user_msg_content = input_message(agent=self)
|
|
6577
|
+
else:
|
|
6578
|
+
raise Exception("input must be a string or a callable when add_references is True")
|
|
6579
|
+
|
|
6580
|
+
try:
|
|
6581
|
+
retrieval_timer = Timer()
|
|
6582
|
+
retrieval_timer.start()
|
|
6583
|
+
docs_from_knowledge = await self.aget_relevant_docs_from_knowledge(
|
|
6584
|
+
query=user_msg_content, filters=run_context.knowledge_filters, **kwargs
|
|
6585
|
+
)
|
|
6586
|
+
if docs_from_knowledge is not None:
|
|
6587
|
+
references = MessageReferences(
|
|
6588
|
+
query=user_msg_content,
|
|
6589
|
+
references=docs_from_knowledge,
|
|
6590
|
+
time=round(retrieval_timer.elapsed, 4),
|
|
6591
|
+
)
|
|
6592
|
+
# Add the references to the run_response
|
|
6593
|
+
if run_response.references is None:
|
|
6594
|
+
run_response.references = []
|
|
6595
|
+
run_response.references.append(references)
|
|
6596
|
+
retrieval_timer.stop()
|
|
6597
|
+
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
|
|
6598
|
+
except Exception as e:
|
|
6599
|
+
log_warning(f"Failed to get references: {e}")
|
|
6600
|
+
|
|
6601
|
+
if self.resolve_in_context:
|
|
6602
|
+
user_msg_content = self._format_message_with_state_variables(
|
|
6603
|
+
user_msg_content,
|
|
6604
|
+
user_id=user_id,
|
|
6605
|
+
session_state=run_context.session_state,
|
|
6606
|
+
dependencies=run_context.dependencies,
|
|
6607
|
+
metadata=run_context.metadata,
|
|
6608
|
+
)
|
|
6609
|
+
|
|
6610
|
+
# Convert to string for concatenation operations
|
|
6611
|
+
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
|
|
6612
|
+
|
|
6613
|
+
# 4.1 Add knowledge references to user message
|
|
6614
|
+
if (
|
|
6615
|
+
self.add_knowledge_to_context
|
|
6616
|
+
and references is not None
|
|
6617
|
+
and references.references is not None
|
|
6618
|
+
and len(references.references) > 0
|
|
6619
|
+
):
|
|
6620
|
+
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
|
|
6621
|
+
user_msg_content_str += "<references>\n"
|
|
6622
|
+
user_msg_content_str += self._convert_documents_to_string(references.references) + "\n"
|
|
6623
|
+
user_msg_content_str += "</references>"
|
|
6624
|
+
# 4.2 Add context to user message
|
|
6625
|
+
if add_dependencies_to_context and run_context.dependencies is not None:
|
|
6626
|
+
user_msg_content_str += "\n\n<additional context>\n"
|
|
6627
|
+
user_msg_content_str += self._convert_dependencies_to_string(run_context.dependencies) + "\n"
|
|
6628
|
+
user_msg_content_str += "</additional context>"
|
|
6629
|
+
|
|
6630
|
+
# Use the string version for the final content
|
|
6631
|
+
user_msg_content = user_msg_content_str
|
|
6632
|
+
|
|
6633
|
+
# Return the user message
|
|
6634
|
+
return Message(
|
|
6635
|
+
role="user",
|
|
6636
|
+
content=user_msg_content,
|
|
6637
|
+
images=None if not self.send_media_to_model else images,
|
|
6638
|
+
audio=None if not self.send_media_to_model else audio,
|
|
6639
|
+
videos=None if not self.send_media_to_model else videos,
|
|
6640
|
+
files=None if not self.send_media_to_model else files,
|
|
6641
|
+
**kwargs,
|
|
6642
|
+
)
|
|
6643
|
+
|
|
6301
6644
|
def _get_messages_for_parser_model(
|
|
6302
|
-
self,
|
|
6645
|
+
self,
|
|
6646
|
+
model_response: ModelResponse,
|
|
6647
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]],
|
|
6648
|
+
run_context: Optional[RunContext] = None,
|
|
6303
6649
|
) -> List[Message]:
|
|
6304
6650
|
from agno.utils.prompts import get_json_output_prompt
|
|
6305
6651
|
|
|
6306
6652
|
"""Get the messages for the parser model."""
|
|
6653
|
+
# Get output_schema from run_context
|
|
6654
|
+
output_schema = run_context.output_schema if run_context else None
|
|
6655
|
+
|
|
6307
6656
|
system_content = (
|
|
6308
6657
|
self.parser_model_prompt
|
|
6309
6658
|
if self.parser_model_prompt is not None
|
|
6310
6659
|
else "You are tasked with creating a structured output from the provided user message."
|
|
6311
6660
|
)
|
|
6312
6661
|
|
|
6313
|
-
if response_format == {"type": "json_object"} and
|
|
6314
|
-
system_content += f"{get_json_output_prompt(
|
|
6662
|
+
if response_format == {"type": "json_object"} and output_schema is not None:
|
|
6663
|
+
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
|
|
6315
6664
|
|
|
6316
6665
|
return [
|
|
6317
6666
|
Message(role="system", content=system_content),
|
|
@@ -6319,19 +6668,25 @@ class Team:
|
|
|
6319
6668
|
]
|
|
6320
6669
|
|
|
6321
6670
|
def _get_messages_for_parser_model_stream(
|
|
6322
|
-
self,
|
|
6671
|
+
self,
|
|
6672
|
+
run_response: TeamRunOutput,
|
|
6673
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]],
|
|
6674
|
+
run_context: Optional[RunContext] = None,
|
|
6323
6675
|
) -> List[Message]:
|
|
6324
6676
|
"""Get the messages for the parser model."""
|
|
6325
6677
|
from agno.utils.prompts import get_json_output_prompt
|
|
6326
6678
|
|
|
6679
|
+
# Get output_schema from run_context
|
|
6680
|
+
output_schema = run_context.output_schema if run_context else None
|
|
6681
|
+
|
|
6327
6682
|
system_content = (
|
|
6328
6683
|
self.parser_model_prompt
|
|
6329
6684
|
if self.parser_model_prompt is not None
|
|
6330
6685
|
else "You are tasked with creating a structured output from the provided data."
|
|
6331
6686
|
)
|
|
6332
6687
|
|
|
6333
|
-
if response_format == {"type": "json_object"} and
|
|
6334
|
-
system_content += f"{get_json_output_prompt(
|
|
6688
|
+
if response_format == {"type": "json_object"} and output_schema is not None:
|
|
6689
|
+
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
|
|
6335
6690
|
|
|
6336
6691
|
return [
|
|
6337
6692
|
Message(role="system", content=system_content),
|
|
@@ -6430,7 +6785,7 @@ class Team:
|
|
|
6430
6785
|
log_error(f"Failed to convert sanitized context to JSON: {e}")
|
|
6431
6786
|
return str(context)
|
|
6432
6787
|
|
|
6433
|
-
def _get_json_output_prompt(self) -> str:
|
|
6788
|
+
def _get_json_output_prompt(self, output_schema: Optional[Type[BaseModel]] = None) -> str:
|
|
6434
6789
|
"""Return the JSON output prompt for the Agent.
|
|
6435
6790
|
|
|
6436
6791
|
This is added to the system prompt when the output_schema is set and structured_outputs is False.
|
|
@@ -6438,17 +6793,17 @@ class Team:
|
|
|
6438
6793
|
import json
|
|
6439
6794
|
|
|
6440
6795
|
json_output_prompt = "Provide your output as a JSON containing the following fields:"
|
|
6441
|
-
if
|
|
6442
|
-
if isinstance(
|
|
6796
|
+
if output_schema is not None:
|
|
6797
|
+
if isinstance(output_schema, str):
|
|
6443
6798
|
json_output_prompt += "\n<json_fields>"
|
|
6444
|
-
json_output_prompt += f"\n{
|
|
6799
|
+
json_output_prompt += f"\n{output_schema}"
|
|
6445
6800
|
json_output_prompt += "\n</json_fields>"
|
|
6446
|
-
elif isinstance(
|
|
6801
|
+
elif isinstance(output_schema, list):
|
|
6447
6802
|
json_output_prompt += "\n<json_fields>"
|
|
6448
|
-
json_output_prompt += f"\n{json.dumps(
|
|
6803
|
+
json_output_prompt += f"\n{json.dumps(output_schema)}"
|
|
6449
6804
|
json_output_prompt += "\n</json_fields>"
|
|
6450
|
-
elif issubclass(
|
|
6451
|
-
json_schema =
|
|
6805
|
+
elif issubclass(output_schema, BaseModel):
|
|
6806
|
+
json_schema = output_schema.model_json_schema()
|
|
6452
6807
|
if json_schema is not None:
|
|
6453
6808
|
response_model_properties = {}
|
|
6454
6809
|
json_schema_properties = json_schema.get("properties")
|
|
@@ -6488,7 +6843,7 @@ class Team:
|
|
|
6488
6843
|
json_output_prompt += f"\n{json.dumps(response_model_properties, indent=2)}"
|
|
6489
6844
|
json_output_prompt += "\n</json_field_properties>"
|
|
6490
6845
|
else:
|
|
6491
|
-
log_warning(f"Could not build json schema for {
|
|
6846
|
+
log_warning(f"Could not build json schema for {output_schema}")
|
|
6492
6847
|
else:
|
|
6493
6848
|
json_output_prompt += "Provide the output as JSON."
|
|
6494
6849
|
|
|
@@ -6892,7 +7247,7 @@ class Team:
|
|
|
6892
7247
|
if not files:
|
|
6893
7248
|
files = []
|
|
6894
7249
|
|
|
6895
|
-
def _setup_delegate_task_to_member(member_agent: Union[Agent, "Team"],
|
|
7250
|
+
def _setup_delegate_task_to_member(member_agent: Union[Agent, "Team"], task: str):
|
|
6896
7251
|
# 1. Initialize the member agent
|
|
6897
7252
|
self._initialize_member(member_agent)
|
|
6898
7253
|
|
|
@@ -6904,8 +7259,10 @@ class Team:
|
|
|
6904
7259
|
# 2. Handle respond_directly nuances
|
|
6905
7260
|
if self.respond_directly:
|
|
6906
7261
|
# Since we return the response directly from the member agent, we need to set the output schema from the team down.
|
|
6907
|
-
|
|
6908
|
-
|
|
7262
|
+
# Get output_schema from run_context
|
|
7263
|
+
team_output_schema = run_context.output_schema if run_context else None
|
|
7264
|
+
if not member_agent.output_schema and team_output_schema:
|
|
7265
|
+
member_agent.output_schema = team_output_schema
|
|
6909
7266
|
|
|
6910
7267
|
# If the member will produce structured output, we need to parse the response
|
|
6911
7268
|
if member_agent.output_schema is not None:
|
|
@@ -6929,7 +7286,7 @@ class Team:
|
|
|
6929
7286
|
if self.determine_input_for_members is False:
|
|
6930
7287
|
member_agent_task = input # type: ignore
|
|
6931
7288
|
else:
|
|
6932
|
-
member_agent_task =
|
|
7289
|
+
member_agent_task = task
|
|
6933
7290
|
|
|
6934
7291
|
if team_history_str or team_member_interactions_str:
|
|
6935
7292
|
member_agent_task = format_member_agent_task( # type: ignore
|
|
@@ -7022,9 +7379,7 @@ class Team:
|
|
|
7022
7379
|
return
|
|
7023
7380
|
|
|
7024
7381
|
_, member_agent = result
|
|
7025
|
-
member_agent_task, history = _setup_delegate_task_to_member(
|
|
7026
|
-
member_agent=member_agent, task_description=task
|
|
7027
|
-
)
|
|
7382
|
+
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
|
|
7028
7383
|
|
|
7029
7384
|
# Make sure for the member agent, we are using the agent logger
|
|
7030
7385
|
use_agent_logger()
|
|
@@ -7043,7 +7398,7 @@ class Team:
|
|
|
7043
7398
|
audio=audio,
|
|
7044
7399
|
files=files,
|
|
7045
7400
|
stream=True,
|
|
7046
|
-
stream_events=stream_events,
|
|
7401
|
+
stream_events=stream_events or self.stream_member_events,
|
|
7047
7402
|
debug_mode=debug_mode,
|
|
7048
7403
|
dependencies=run_context.dependencies,
|
|
7049
7404
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
@@ -7150,9 +7505,7 @@ class Team:
|
|
|
7150
7505
|
return
|
|
7151
7506
|
|
|
7152
7507
|
_, member_agent = result
|
|
7153
|
-
member_agent_task, history = _setup_delegate_task_to_member(
|
|
7154
|
-
member_agent=member_agent, task_description=task
|
|
7155
|
-
)
|
|
7508
|
+
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
|
|
7156
7509
|
|
|
7157
7510
|
# Make sure for the member agent, we are using the agent logger
|
|
7158
7511
|
use_agent_logger()
|
|
@@ -7171,7 +7524,7 @@ class Team:
|
|
|
7171
7524
|
audio=audio,
|
|
7172
7525
|
files=files,
|
|
7173
7526
|
stream=True,
|
|
7174
|
-
stream_events=stream_events,
|
|
7527
|
+
stream_events=stream_events or self.stream_member_events,
|
|
7175
7528
|
debug_mode=debug_mode,
|
|
7176
7529
|
dependencies=run_context.dependencies,
|
|
7177
7530
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
@@ -7268,9 +7621,7 @@ class Team:
|
|
|
7268
7621
|
|
|
7269
7622
|
# Run all the members sequentially
|
|
7270
7623
|
for _, member_agent in enumerate(self.members):
|
|
7271
|
-
member_agent_task, history = _setup_delegate_task_to_member(
|
|
7272
|
-
member_agent=member_agent, task_description=task
|
|
7273
|
-
)
|
|
7624
|
+
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
|
|
7274
7625
|
|
|
7275
7626
|
member_session_state_copy = copy(run_context.session_state)
|
|
7276
7627
|
if stream:
|
|
@@ -7285,7 +7636,7 @@ class Team:
|
|
|
7285
7636
|
audio=audio,
|
|
7286
7637
|
files=files,
|
|
7287
7638
|
stream=True,
|
|
7288
|
-
stream_events=stream_events,
|
|
7639
|
+
stream_events=stream_events or self.stream_member_events,
|
|
7289
7640
|
knowledge_filters=run_context.knowledge_filters
|
|
7290
7641
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
7291
7642
|
else None,
|
|
@@ -7384,9 +7735,7 @@ class Team:
|
|
|
7384
7735
|
queue: "asyncio.Queue[Union[RunOutputEvent, TeamRunOutputEvent, str, object]]" = asyncio.Queue()
|
|
7385
7736
|
|
|
7386
7737
|
async def stream_member(agent: Union[Agent, "Team"]) -> None:
|
|
7387
|
-
member_agent_task, history = _setup_delegate_task_to_member(
|
|
7388
|
-
member_agent=agent, task_description=task
|
|
7389
|
-
) # type: ignore
|
|
7738
|
+
member_agent_task, history = _setup_delegate_task_to_member(member_agent=agent, task=task) # type: ignore
|
|
7390
7739
|
member_session_state_copy = copy(run_context.session_state)
|
|
7391
7740
|
|
|
7392
7741
|
member_stream = agent.arun( # type: ignore
|
|
@@ -7399,7 +7748,7 @@ class Team:
|
|
|
7399
7748
|
audio=audio,
|
|
7400
7749
|
files=files,
|
|
7401
7750
|
stream=True,
|
|
7402
|
-
stream_events=stream_events,
|
|
7751
|
+
stream_events=stream_events or self.stream_member_events,
|
|
7403
7752
|
debug_mode=debug_mode,
|
|
7404
7753
|
knowledge_filters=run_context.knowledge_filters
|
|
7405
7754
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
@@ -7462,9 +7811,7 @@ class Team:
|
|
|
7462
7811
|
tasks = []
|
|
7463
7812
|
for member_agent_index, member_agent in enumerate(self.members):
|
|
7464
7813
|
current_agent = member_agent
|
|
7465
|
-
member_agent_task, history = _setup_delegate_task_to_member(
|
|
7466
|
-
member_agent=current_agent, task_description=task
|
|
7467
|
-
)
|
|
7814
|
+
member_agent_task, history = _setup_delegate_task_to_member(member_agent=current_agent, task=task)
|
|
7468
7815
|
|
|
7469
7816
|
async def run_member_agent(agent=current_agent) -> str:
|
|
7470
7817
|
member_session_state_copy = copy(run_context.session_state)
|
|
@@ -8209,8 +8556,7 @@ class Team:
|
|
|
8209
8556
|
|
|
8210
8557
|
session = self.get_session(session_id=session_id) # type: ignore
|
|
8211
8558
|
if session is None:
|
|
8212
|
-
|
|
8213
|
-
return []
|
|
8559
|
+
raise Exception("Session not found")
|
|
8214
8560
|
|
|
8215
8561
|
return session.get_messages(
|
|
8216
8562
|
team_id=self.id,
|