openai-agents 0.3.3__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +12 -0
- agents/_run_impl.py +16 -6
- agents/extensions/memory/__init__.py +1 -3
- agents/extensions/memory/sqlalchemy_session.py +12 -3
- agents/extensions/models/litellm_model.py +3 -3
- agents/items.py +100 -4
- agents/mcp/server.py +43 -11
- agents/mcp/util.py +17 -1
- agents/memory/openai_conversations_session.py +2 -2
- agents/models/chatcmpl_converter.py +44 -18
- agents/models/openai_chatcompletions.py +27 -26
- agents/models/openai_responses.py +31 -29
- agents/realtime/handoffs.py +1 -1
- agents/result.py +48 -11
- agents/run.py +214 -27
- agents/strict_schema.py +14 -0
- agents/tool.py +72 -3
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.0.dist-info}/METADATA +2 -2
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.0.dist-info}/RECORD +21 -21
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.0.dist-info}/WHEEL +0 -0
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.0.dist-info}/licenses/LICENSE +0 -0
agents/run.py
CHANGED
|
@@ -13,6 +13,7 @@ from openai.types.responses import (
|
|
|
13
13
|
from openai.types.responses.response_prompt_param import (
|
|
14
14
|
ResponsePromptParam,
|
|
15
15
|
)
|
|
16
|
+
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
|
|
16
17
|
from typing_extensions import NotRequired, TypedDict, Unpack
|
|
17
18
|
|
|
18
19
|
from ._run_impl import (
|
|
@@ -48,6 +49,7 @@ from .items import (
|
|
|
48
49
|
HandoffCallItem,
|
|
49
50
|
ItemHelpers,
|
|
50
51
|
ModelResponse,
|
|
52
|
+
ReasoningItem,
|
|
51
53
|
RunItem,
|
|
52
54
|
ToolCallItem,
|
|
53
55
|
ToolCallItemTypes,
|
|
@@ -122,6 +124,51 @@ class CallModelData(Generic[TContext]):
|
|
|
122
124
|
context: TContext | None
|
|
123
125
|
|
|
124
126
|
|
|
127
|
+
@dataclass
|
|
128
|
+
class _ServerConversationTracker:
|
|
129
|
+
"""Tracks server-side conversation state for either conversation_id or
|
|
130
|
+
previous_response_id modes."""
|
|
131
|
+
|
|
132
|
+
conversation_id: str | None = None
|
|
133
|
+
previous_response_id: str | None = None
|
|
134
|
+
sent_items: set[int] = field(default_factory=set)
|
|
135
|
+
server_items: set[int] = field(default_factory=set)
|
|
136
|
+
|
|
137
|
+
def track_server_items(self, model_response: ModelResponse) -> None:
|
|
138
|
+
for output_item in model_response.output:
|
|
139
|
+
self.server_items.add(id(output_item))
|
|
140
|
+
|
|
141
|
+
# Update previous_response_id only when using previous_response_id
|
|
142
|
+
if (
|
|
143
|
+
self.conversation_id is None
|
|
144
|
+
and self.previous_response_id is not None
|
|
145
|
+
and model_response.response_id is not None
|
|
146
|
+
):
|
|
147
|
+
self.previous_response_id = model_response.response_id
|
|
148
|
+
|
|
149
|
+
def prepare_input(
|
|
150
|
+
self,
|
|
151
|
+
original_input: str | list[TResponseInputItem],
|
|
152
|
+
generated_items: list[RunItem],
|
|
153
|
+
) -> list[TResponseInputItem]:
|
|
154
|
+
input_items: list[TResponseInputItem] = []
|
|
155
|
+
|
|
156
|
+
# On first call (when there are no generated items yet), include the original input
|
|
157
|
+
if not generated_items:
|
|
158
|
+
input_items.extend(ItemHelpers.input_to_new_input_list(original_input))
|
|
159
|
+
|
|
160
|
+
# Process generated_items, skip items already sent or from server
|
|
161
|
+
for item in generated_items:
|
|
162
|
+
raw_item_id = id(item.raw_item)
|
|
163
|
+
|
|
164
|
+
if raw_item_id in self.sent_items or raw_item_id in self.server_items:
|
|
165
|
+
continue
|
|
166
|
+
input_items.append(item.to_input_item())
|
|
167
|
+
self.sent_items.add(raw_item_id)
|
|
168
|
+
|
|
169
|
+
return input_items
|
|
170
|
+
|
|
171
|
+
|
|
125
172
|
# Type alias for the optional input filter callback
|
|
126
173
|
CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
|
|
127
174
|
|
|
@@ -470,6 +517,13 @@ class AgentRunner:
|
|
|
470
517
|
if run_config is None:
|
|
471
518
|
run_config = RunConfig()
|
|
472
519
|
|
|
520
|
+
if conversation_id is not None or previous_response_id is not None:
|
|
521
|
+
server_conversation_tracker = _ServerConversationTracker(
|
|
522
|
+
conversation_id=conversation_id, previous_response_id=previous_response_id
|
|
523
|
+
)
|
|
524
|
+
else:
|
|
525
|
+
server_conversation_tracker = None
|
|
526
|
+
|
|
473
527
|
# Keep original user input separate from session-prepared input
|
|
474
528
|
original_user_input = input
|
|
475
529
|
prepared_input = await self._prepare_input_with_session(
|
|
@@ -563,8 +617,7 @@ class AgentRunner:
|
|
|
563
617
|
run_config=run_config,
|
|
564
618
|
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
565
619
|
tool_use_tracker=tool_use_tracker,
|
|
566
|
-
|
|
567
|
-
conversation_id=conversation_id,
|
|
620
|
+
server_conversation_tracker=server_conversation_tracker,
|
|
568
621
|
),
|
|
569
622
|
)
|
|
570
623
|
else:
|
|
@@ -578,8 +631,7 @@ class AgentRunner:
|
|
|
578
631
|
run_config=run_config,
|
|
579
632
|
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
580
633
|
tool_use_tracker=tool_use_tracker,
|
|
581
|
-
|
|
582
|
-
conversation_id=conversation_id,
|
|
634
|
+
server_conversation_tracker=server_conversation_tracker,
|
|
583
635
|
)
|
|
584
636
|
should_run_agent_start_hooks = False
|
|
585
637
|
|
|
@@ -587,6 +639,9 @@ class AgentRunner:
|
|
|
587
639
|
original_input = turn_result.original_input
|
|
588
640
|
generated_items = turn_result.generated_items
|
|
589
641
|
|
|
642
|
+
if server_conversation_tracker is not None:
|
|
643
|
+
server_conversation_tracker.track_server_items(turn_result.model_response)
|
|
644
|
+
|
|
590
645
|
# Collect tool guardrail results from this turn
|
|
591
646
|
tool_input_guardrail_results.extend(turn_result.tool_input_guardrail_results)
|
|
592
647
|
tool_output_guardrail_results.extend(turn_result.tool_output_guardrail_results)
|
|
@@ -610,7 +665,13 @@ class AgentRunner:
|
|
|
610
665
|
tool_output_guardrail_results=tool_output_guardrail_results,
|
|
611
666
|
context_wrapper=context_wrapper,
|
|
612
667
|
)
|
|
613
|
-
|
|
668
|
+
if not any(
|
|
669
|
+
guardrail_result.output.tripwire_triggered
|
|
670
|
+
for guardrail_result in input_guardrail_results
|
|
671
|
+
):
|
|
672
|
+
await self._save_result_to_session(
|
|
673
|
+
session, [], turn_result.new_step_items
|
|
674
|
+
)
|
|
614
675
|
|
|
615
676
|
return result
|
|
616
677
|
elif isinstance(turn_result.next_step, NextStepHandoff):
|
|
@@ -619,7 +680,13 @@ class AgentRunner:
|
|
|
619
680
|
current_span = None
|
|
620
681
|
should_run_agent_start_hooks = True
|
|
621
682
|
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
622
|
-
|
|
683
|
+
if not any(
|
|
684
|
+
guardrail_result.output.tripwire_triggered
|
|
685
|
+
for guardrail_result in input_guardrail_results
|
|
686
|
+
):
|
|
687
|
+
await self._save_result_to_session(
|
|
688
|
+
session, [], turn_result.new_step_items
|
|
689
|
+
)
|
|
623
690
|
else:
|
|
624
691
|
raise AgentsException(
|
|
625
692
|
f"Unknown next step type: {type(turn_result.next_step)}"
|
|
@@ -863,6 +930,13 @@ class AgentRunner:
|
|
|
863
930
|
should_run_agent_start_hooks = True
|
|
864
931
|
tool_use_tracker = AgentToolUseTracker()
|
|
865
932
|
|
|
933
|
+
if conversation_id is not None or previous_response_id is not None:
|
|
934
|
+
server_conversation_tracker = _ServerConversationTracker(
|
|
935
|
+
conversation_id=conversation_id, previous_response_id=previous_response_id
|
|
936
|
+
)
|
|
937
|
+
else:
|
|
938
|
+
server_conversation_tracker = None
|
|
939
|
+
|
|
866
940
|
streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
|
|
867
941
|
|
|
868
942
|
try:
|
|
@@ -877,6 +951,12 @@ class AgentRunner:
|
|
|
877
951
|
await AgentRunner._save_result_to_session(session, starting_input, [])
|
|
878
952
|
|
|
879
953
|
while True:
|
|
954
|
+
# Check for soft cancel before starting new turn
|
|
955
|
+
if streamed_result._cancel_mode == "after_turn":
|
|
956
|
+
streamed_result.is_complete = True
|
|
957
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
958
|
+
break
|
|
959
|
+
|
|
880
960
|
if streamed_result.is_complete:
|
|
881
961
|
break
|
|
882
962
|
|
|
@@ -938,8 +1018,7 @@ class AgentRunner:
|
|
|
938
1018
|
should_run_agent_start_hooks,
|
|
939
1019
|
tool_use_tracker,
|
|
940
1020
|
all_tools,
|
|
941
|
-
|
|
942
|
-
conversation_id,
|
|
1021
|
+
server_conversation_tracker,
|
|
943
1022
|
)
|
|
944
1023
|
should_run_agent_start_hooks = False
|
|
945
1024
|
|
|
@@ -949,7 +1028,24 @@ class AgentRunner:
|
|
|
949
1028
|
streamed_result.input = turn_result.original_input
|
|
950
1029
|
streamed_result.new_items = turn_result.generated_items
|
|
951
1030
|
|
|
1031
|
+
if server_conversation_tracker is not None:
|
|
1032
|
+
server_conversation_tracker.track_server_items(turn_result.model_response)
|
|
1033
|
+
|
|
952
1034
|
if isinstance(turn_result.next_step, NextStepHandoff):
|
|
1035
|
+
# Save the conversation to session if enabled (before handoff)
|
|
1036
|
+
# Note: Non-streaming path doesn't save handoff turns immediately,
|
|
1037
|
+
# but streaming needs to for graceful cancellation support
|
|
1038
|
+
if session is not None:
|
|
1039
|
+
should_skip_session_save = (
|
|
1040
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1041
|
+
streamed_result
|
|
1042
|
+
)
|
|
1043
|
+
)
|
|
1044
|
+
if should_skip_session_save is False:
|
|
1045
|
+
await AgentRunner._save_result_to_session(
|
|
1046
|
+
session, [], turn_result.new_step_items
|
|
1047
|
+
)
|
|
1048
|
+
|
|
953
1049
|
current_agent = turn_result.next_step.new_agent
|
|
954
1050
|
current_span.finish(reset_current=True)
|
|
955
1051
|
current_span = None
|
|
@@ -957,6 +1053,12 @@ class AgentRunner:
|
|
|
957
1053
|
streamed_result._event_queue.put_nowait(
|
|
958
1054
|
AgentUpdatedStreamEvent(new_agent=current_agent)
|
|
959
1055
|
)
|
|
1056
|
+
|
|
1057
|
+
# Check for soft cancel after handoff
|
|
1058
|
+
if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
|
|
1059
|
+
streamed_result.is_complete = True
|
|
1060
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
1061
|
+
break
|
|
960
1062
|
elif isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
961
1063
|
streamed_result._output_guardrails_task = asyncio.create_task(
|
|
962
1064
|
cls._run_output_guardrails(
|
|
@@ -979,15 +1081,35 @@ class AgentRunner:
|
|
|
979
1081
|
streamed_result.is_complete = True
|
|
980
1082
|
|
|
981
1083
|
# Save the conversation to session if enabled
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
1084
|
+
if session is not None:
|
|
1085
|
+
should_skip_session_save = (
|
|
1086
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1087
|
+
streamed_result
|
|
1088
|
+
)
|
|
1089
|
+
)
|
|
1090
|
+
if should_skip_session_save is False:
|
|
1091
|
+
await AgentRunner._save_result_to_session(
|
|
1092
|
+
session, [], turn_result.new_step_items
|
|
1093
|
+
)
|
|
985
1094
|
|
|
986
1095
|
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
987
1096
|
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
1097
|
+
if session is not None:
|
|
1098
|
+
should_skip_session_save = (
|
|
1099
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1100
|
+
streamed_result
|
|
1101
|
+
)
|
|
1102
|
+
)
|
|
1103
|
+
if should_skip_session_save is False:
|
|
1104
|
+
await AgentRunner._save_result_to_session(
|
|
1105
|
+
session, [], turn_result.new_step_items
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
# Check for soft cancel after turn completion
|
|
1109
|
+
if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
|
|
1110
|
+
streamed_result.is_complete = True
|
|
1111
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
1112
|
+
break
|
|
991
1113
|
except AgentsException as exc:
|
|
992
1114
|
streamed_result.is_complete = True
|
|
993
1115
|
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
@@ -1032,10 +1154,10 @@ class AgentRunner:
|
|
|
1032
1154
|
should_run_agent_start_hooks: bool,
|
|
1033
1155
|
tool_use_tracker: AgentToolUseTracker,
|
|
1034
1156
|
all_tools: list[Tool],
|
|
1035
|
-
|
|
1036
|
-
conversation_id: str | None,
|
|
1157
|
+
server_conversation_tracker: _ServerConversationTracker | None = None,
|
|
1037
1158
|
) -> SingleStepResult:
|
|
1038
1159
|
emitted_tool_call_ids: set[str] = set()
|
|
1160
|
+
emitted_reasoning_item_ids: set[str] = set()
|
|
1039
1161
|
|
|
1040
1162
|
if should_run_agent_start_hooks:
|
|
1041
1163
|
await asyncio.gather(
|
|
@@ -1064,8 +1186,13 @@ class AgentRunner:
|
|
|
1064
1186
|
|
|
1065
1187
|
final_response: ModelResponse | None = None
|
|
1066
1188
|
|
|
1067
|
-
|
|
1068
|
-
|
|
1189
|
+
if server_conversation_tracker is not None:
|
|
1190
|
+
input = server_conversation_tracker.prepare_input(
|
|
1191
|
+
streamed_result.input, streamed_result.new_items
|
|
1192
|
+
)
|
|
1193
|
+
else:
|
|
1194
|
+
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
|
|
1195
|
+
input.extend([item.to_input_item() for item in streamed_result.new_items])
|
|
1069
1196
|
|
|
1070
1197
|
# THIS IS THE RESOLVED CONFLICT BLOCK
|
|
1071
1198
|
filtered = await cls._maybe_filter_model_input(
|
|
@@ -1088,6 +1215,15 @@ class AgentRunner:
|
|
|
1088
1215
|
),
|
|
1089
1216
|
)
|
|
1090
1217
|
|
|
1218
|
+
previous_response_id = (
|
|
1219
|
+
server_conversation_tracker.previous_response_id
|
|
1220
|
+
if server_conversation_tracker
|
|
1221
|
+
else None
|
|
1222
|
+
)
|
|
1223
|
+
conversation_id = (
|
|
1224
|
+
server_conversation_tracker.conversation_id if server_conversation_tracker else None
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1091
1227
|
# 1. Stream the output events
|
|
1092
1228
|
async for event in model.stream_response(
|
|
1093
1229
|
filtered.instructions,
|
|
@@ -1103,6 +1239,9 @@ class AgentRunner:
|
|
|
1103
1239
|
conversation_id=conversation_id,
|
|
1104
1240
|
prompt=prompt_config,
|
|
1105
1241
|
):
|
|
1242
|
+
# Emit the raw event ASAP
|
|
1243
|
+
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
1244
|
+
|
|
1106
1245
|
if isinstance(event, ResponseCompletedEvent):
|
|
1107
1246
|
usage = (
|
|
1108
1247
|
Usage(
|
|
@@ -1142,7 +1281,16 @@ class AgentRunner:
|
|
|
1142
1281
|
RunItemStreamEvent(item=tool_item, name="tool_called")
|
|
1143
1282
|
)
|
|
1144
1283
|
|
|
1145
|
-
|
|
1284
|
+
elif isinstance(output_item, ResponseReasoningItem):
|
|
1285
|
+
reasoning_id: str | None = getattr(output_item, "id", None)
|
|
1286
|
+
|
|
1287
|
+
if reasoning_id and reasoning_id not in emitted_reasoning_item_ids:
|
|
1288
|
+
emitted_reasoning_item_ids.add(reasoning_id)
|
|
1289
|
+
|
|
1290
|
+
reasoning_item = ReasoningItem(raw_item=output_item, agent=agent)
|
|
1291
|
+
streamed_result._event_queue.put_nowait(
|
|
1292
|
+
RunItemStreamEvent(item=reasoning_item, name="reasoning_item_created")
|
|
1293
|
+
)
|
|
1146
1294
|
|
|
1147
1295
|
# Call hook just after the model response is finalized.
|
|
1148
1296
|
if final_response is not None:
|
|
@@ -1196,6 +1344,18 @@ class AgentRunner:
|
|
|
1196
1344
|
)
|
|
1197
1345
|
]
|
|
1198
1346
|
|
|
1347
|
+
if emitted_reasoning_item_ids:
|
|
1348
|
+
# Filter out reasoning items that were already emitted during streaming
|
|
1349
|
+
items_to_filter = [
|
|
1350
|
+
item
|
|
1351
|
+
for item in items_to_filter
|
|
1352
|
+
if not (
|
|
1353
|
+
isinstance(item, ReasoningItem)
|
|
1354
|
+
and (reasoning_id := getattr(item.raw_item, "id", None))
|
|
1355
|
+
and reasoning_id in emitted_reasoning_item_ids
|
|
1356
|
+
)
|
|
1357
|
+
]
|
|
1358
|
+
|
|
1199
1359
|
# Filter out HandoffCallItem to avoid duplicates (already sent earlier)
|
|
1200
1360
|
items_to_filter = [
|
|
1201
1361
|
item for item in items_to_filter if not isinstance(item, HandoffCallItem)
|
|
@@ -1219,8 +1379,7 @@ class AgentRunner:
|
|
|
1219
1379
|
run_config: RunConfig,
|
|
1220
1380
|
should_run_agent_start_hooks: bool,
|
|
1221
1381
|
tool_use_tracker: AgentToolUseTracker,
|
|
1222
|
-
|
|
1223
|
-
conversation_id: str | None,
|
|
1382
|
+
server_conversation_tracker: _ServerConversationTracker | None = None,
|
|
1224
1383
|
) -> SingleStepResult:
|
|
1225
1384
|
# Ensure we run the hooks before anything else
|
|
1226
1385
|
if should_run_agent_start_hooks:
|
|
@@ -1240,8 +1399,11 @@ class AgentRunner:
|
|
|
1240
1399
|
|
|
1241
1400
|
output_schema = cls._get_output_schema(agent)
|
|
1242
1401
|
handoffs = await cls._get_handoffs(agent, context_wrapper)
|
|
1243
|
-
|
|
1244
|
-
|
|
1402
|
+
if server_conversation_tracker is not None:
|
|
1403
|
+
input = server_conversation_tracker.prepare_input(original_input, generated_items)
|
|
1404
|
+
else:
|
|
1405
|
+
input = ItemHelpers.input_to_new_input_list(original_input)
|
|
1406
|
+
input.extend([generated_item.to_input_item() for generated_item in generated_items])
|
|
1245
1407
|
|
|
1246
1408
|
new_response = await cls._get_new_response(
|
|
1247
1409
|
agent,
|
|
@@ -1254,8 +1416,7 @@ class AgentRunner:
|
|
|
1254
1416
|
context_wrapper,
|
|
1255
1417
|
run_config,
|
|
1256
1418
|
tool_use_tracker,
|
|
1257
|
-
|
|
1258
|
-
conversation_id,
|
|
1419
|
+
server_conversation_tracker,
|
|
1259
1420
|
prompt_config,
|
|
1260
1421
|
)
|
|
1261
1422
|
|
|
@@ -1459,8 +1620,7 @@ class AgentRunner:
|
|
|
1459
1620
|
context_wrapper: RunContextWrapper[TContext],
|
|
1460
1621
|
run_config: RunConfig,
|
|
1461
1622
|
tool_use_tracker: AgentToolUseTracker,
|
|
1462
|
-
|
|
1463
|
-
conversation_id: str | None,
|
|
1623
|
+
server_conversation_tracker: _ServerConversationTracker | None,
|
|
1464
1624
|
prompt_config: ResponsePromptParam | None,
|
|
1465
1625
|
) -> ModelResponse:
|
|
1466
1626
|
# Allow user to modify model input right before the call, if configured
|
|
@@ -1491,6 +1651,15 @@ class AgentRunner:
|
|
|
1491
1651
|
),
|
|
1492
1652
|
)
|
|
1493
1653
|
|
|
1654
|
+
previous_response_id = (
|
|
1655
|
+
server_conversation_tracker.previous_response_id
|
|
1656
|
+
if server_conversation_tracker
|
|
1657
|
+
else None
|
|
1658
|
+
)
|
|
1659
|
+
conversation_id = (
|
|
1660
|
+
server_conversation_tracker.conversation_id if server_conversation_tracker else None
|
|
1661
|
+
)
|
|
1662
|
+
|
|
1494
1663
|
new_response = await model.get_response(
|
|
1495
1664
|
system_instructions=filtered.instructions,
|
|
1496
1665
|
input=filtered.input,
|
|
@@ -1635,6 +1804,24 @@ class AgentRunner:
|
|
|
1635
1804
|
items_to_save = input_list + new_items_as_input
|
|
1636
1805
|
await session.add_items(items_to_save)
|
|
1637
1806
|
|
|
1807
|
+
@staticmethod
|
|
1808
|
+
async def _input_guardrail_tripwire_triggered_for_stream(
|
|
1809
|
+
streamed_result: RunResultStreaming,
|
|
1810
|
+
) -> bool:
|
|
1811
|
+
"""Return True if any input guardrail triggered during a streamed run."""
|
|
1812
|
+
|
|
1813
|
+
task = streamed_result._input_guardrails_task
|
|
1814
|
+
if task is None:
|
|
1815
|
+
return False
|
|
1816
|
+
|
|
1817
|
+
if not task.done():
|
|
1818
|
+
await task
|
|
1819
|
+
|
|
1820
|
+
return any(
|
|
1821
|
+
guardrail_result.output.tripwire_triggered
|
|
1822
|
+
for guardrail_result in streamed_result.input_guardrail_results
|
|
1823
|
+
)
|
|
1824
|
+
|
|
1638
1825
|
|
|
1639
1826
|
DEFAULT_AGENT_RUNNER = AgentRunner()
|
|
1640
1827
|
_TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
|
agents/strict_schema.py
CHANGED
|
@@ -87,6 +87,20 @@ def _ensure_strict_json_schema(
|
|
|
87
87
|
for i, variant in enumerate(any_of)
|
|
88
88
|
]
|
|
89
89
|
|
|
90
|
+
# oneOf is not supported by OpenAI's structured outputs in nested contexts,
|
|
91
|
+
# so we convert it to anyOf which provides equivalent functionality for
|
|
92
|
+
# discriminated unions
|
|
93
|
+
one_of = json_schema.get("oneOf")
|
|
94
|
+
if is_list(one_of):
|
|
95
|
+
existing_any_of = json_schema.get("anyOf", [])
|
|
96
|
+
if not is_list(existing_any_of):
|
|
97
|
+
existing_any_of = []
|
|
98
|
+
json_schema["anyOf"] = existing_any_of + [
|
|
99
|
+
_ensure_strict_json_schema(variant, path=(*path, "oneOf", str(i)), root=root)
|
|
100
|
+
for i, variant in enumerate(one_of)
|
|
101
|
+
]
|
|
102
|
+
json_schema.pop("oneOf")
|
|
103
|
+
|
|
90
104
|
# intersections
|
|
91
105
|
all_of = json_schema.get("allOf")
|
|
92
106
|
if is_list(all_of):
|
agents/tool.py
CHANGED
|
@@ -15,14 +15,13 @@ from openai.types.responses.response_output_item import LocalShellCall, McpAppro
|
|
|
15
15
|
from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
|
|
16
16
|
from openai.types.responses.web_search_tool import Filters as WebSearchToolFilters
|
|
17
17
|
from openai.types.responses.web_search_tool_param import UserLocation
|
|
18
|
-
from pydantic import ValidationError
|
|
18
|
+
from pydantic import BaseModel, TypeAdapter, ValidationError
|
|
19
19
|
from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
|
|
20
20
|
|
|
21
21
|
from . import _debug
|
|
22
22
|
from .computer import AsyncComputer, Computer
|
|
23
23
|
from .exceptions import ModelBehaviorError
|
|
24
24
|
from .function_schema import DocstringStyle, function_schema
|
|
25
|
-
from .items import RunItem
|
|
26
25
|
from .logger import logger
|
|
27
26
|
from .run_context import RunContextWrapper
|
|
28
27
|
from .strict_schema import ensure_strict_json_schema
|
|
@@ -34,6 +33,8 @@ from .util._types import MaybeAwaitable
|
|
|
34
33
|
|
|
35
34
|
if TYPE_CHECKING:
|
|
36
35
|
from .agent import Agent, AgentBase
|
|
36
|
+
from .items import RunItem
|
|
37
|
+
|
|
37
38
|
|
|
38
39
|
ToolParams = ParamSpec("ToolParams")
|
|
39
40
|
|
|
@@ -48,6 +49,72 @@ ToolFunction = Union[
|
|
|
48
49
|
]
|
|
49
50
|
|
|
50
51
|
|
|
52
|
+
class ToolOutputText(BaseModel):
|
|
53
|
+
"""Represents a tool output that should be sent to the model as text."""
|
|
54
|
+
|
|
55
|
+
type: Literal["text"] = "text"
|
|
56
|
+
text: str
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ToolOutputTextDict(TypedDict, total=False):
|
|
60
|
+
"""TypedDict variant for text tool outputs."""
|
|
61
|
+
|
|
62
|
+
type: Literal["text"]
|
|
63
|
+
text: str
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ToolOutputImage(BaseModel):
|
|
67
|
+
"""Represents a tool output that should be sent to the model as an image.
|
|
68
|
+
|
|
69
|
+
You can provide either an `image_url` (URL or data URL) or a `file_id` for previously uploaded
|
|
70
|
+
content. The optional `detail` can control vision detail.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
type: Literal["image"] = "image"
|
|
74
|
+
image_url: str | None = None
|
|
75
|
+
file_id: str | None = None
|
|
76
|
+
detail: Literal["low", "high", "auto"] | None = None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class ToolOutputImageDict(TypedDict, total=False):
|
|
80
|
+
"""TypedDict variant for image tool outputs."""
|
|
81
|
+
|
|
82
|
+
type: Literal["image"]
|
|
83
|
+
image_url: NotRequired[str]
|
|
84
|
+
file_id: NotRequired[str]
|
|
85
|
+
detail: NotRequired[Literal["low", "high", "auto"]]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ToolOutputFileContent(BaseModel):
|
|
89
|
+
"""Represents a tool output that should be sent to the model as a file.
|
|
90
|
+
|
|
91
|
+
Provide one of `file_data` (base64), `file_url`, or `file_id`. You may also
|
|
92
|
+
provide an optional `filename` when using `file_data` to hint file name.
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
type: Literal["file"] = "file"
|
|
96
|
+
file_data: str | None = None
|
|
97
|
+
file_url: str | None = None
|
|
98
|
+
file_id: str | None = None
|
|
99
|
+
filename: str | None = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ToolOutputFileContentDict(TypedDict, total=False):
|
|
103
|
+
"""TypedDict variant for file content tool outputs."""
|
|
104
|
+
|
|
105
|
+
type: Literal["file"]
|
|
106
|
+
file_data: NotRequired[str]
|
|
107
|
+
file_url: NotRequired[str]
|
|
108
|
+
file_id: NotRequired[str]
|
|
109
|
+
filename: NotRequired[str]
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
ValidToolOutputPydanticModels = Union[ToolOutputText, ToolOutputImage, ToolOutputFileContent]
|
|
113
|
+
ValidToolOutputPydanticModelsTypeAdapter: TypeAdapter[ValidToolOutputPydanticModels] = TypeAdapter(
|
|
114
|
+
ValidToolOutputPydanticModels
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
51
118
|
@dataclass
|
|
52
119
|
class FunctionToolResult:
|
|
53
120
|
tool: FunctionTool
|
|
@@ -81,7 +148,9 @@ class FunctionTool:
|
|
|
81
148
|
1. The tool run context.
|
|
82
149
|
2. The arguments from the LLM, as a JSON string.
|
|
83
150
|
|
|
84
|
-
You must return a
|
|
151
|
+
You must return a one of the structured tool output types (e.g. ToolOutputText, ToolOutputImage,
|
|
152
|
+
ToolOutputFileContent) or a string representation of the tool output, or a list of them,
|
|
153
|
+
or something we can call `str()` on.
|
|
85
154
|
In case of errors, you can either raise an Exception (which will cause the run to fail) or
|
|
86
155
|
return a string error message (which will be sent back to the LLM).
|
|
87
156
|
"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
|
|
|
21
21
|
Requires-Python: >=3.9
|
|
22
22
|
Requires-Dist: griffe<2,>=1.5.6
|
|
23
23
|
Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
|
|
24
|
-
Requires-Dist: openai<2
|
|
24
|
+
Requires-Dist: openai<3,>=2.2
|
|
25
25
|
Requires-Dist: pydantic<3,>=2.10
|
|
26
26
|
Requires-Dist: requests<3,>=2.0
|
|
27
27
|
Requires-Dist: types-requests<3,>=2.0
|