openai-agents 0.3.3__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +12 -0
- agents/_run_impl.py +18 -6
- agents/extensions/memory/__init__.py +1 -3
- agents/extensions/memory/sqlalchemy_session.py +25 -3
- agents/extensions/models/litellm_model.py +11 -6
- agents/items.py +103 -4
- agents/mcp/server.py +43 -11
- agents/mcp/util.py +17 -1
- agents/memory/openai_conversations_session.py +2 -2
- agents/models/chatcmpl_converter.py +44 -18
- agents/models/openai_chatcompletions.py +27 -26
- agents/models/openai_responses.py +31 -29
- agents/realtime/handoffs.py +1 -1
- agents/realtime/model_inputs.py +3 -0
- agents/realtime/openai_realtime.py +38 -29
- agents/realtime/session.py +1 -1
- agents/result.py +48 -11
- agents/run.py +223 -27
- agents/stream_events.py +1 -0
- agents/strict_schema.py +14 -0
- agents/tool.py +86 -3
- agents/voice/models/openai_stt.py +2 -1
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.1.dist-info}/METADATA +2 -2
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.1.dist-info}/RECORD +26 -26
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.1.dist-info}/WHEEL +0 -0
- {openai_agents-0.3.3.dist-info → openai_agents-0.4.1.dist-info}/licenses/LICENSE +0 -0
agents/run.py
CHANGED
|
@@ -13,6 +13,7 @@ from openai.types.responses import (
|
|
|
13
13
|
from openai.types.responses.response_prompt_param import (
|
|
14
14
|
ResponsePromptParam,
|
|
15
15
|
)
|
|
16
|
+
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
|
|
16
17
|
from typing_extensions import NotRequired, TypedDict, Unpack
|
|
17
18
|
|
|
18
19
|
from ._run_impl import (
|
|
@@ -48,6 +49,7 @@ from .items import (
|
|
|
48
49
|
HandoffCallItem,
|
|
49
50
|
ItemHelpers,
|
|
50
51
|
ModelResponse,
|
|
52
|
+
ReasoningItem,
|
|
51
53
|
RunItem,
|
|
52
54
|
ToolCallItem,
|
|
53
55
|
ToolCallItemTypes,
|
|
@@ -122,6 +124,51 @@ class CallModelData(Generic[TContext]):
|
|
|
122
124
|
context: TContext | None
|
|
123
125
|
|
|
124
126
|
|
|
127
|
+
@dataclass
|
|
128
|
+
class _ServerConversationTracker:
|
|
129
|
+
"""Tracks server-side conversation state for either conversation_id or
|
|
130
|
+
previous_response_id modes."""
|
|
131
|
+
|
|
132
|
+
conversation_id: str | None = None
|
|
133
|
+
previous_response_id: str | None = None
|
|
134
|
+
sent_items: set[int] = field(default_factory=set)
|
|
135
|
+
server_items: set[int] = field(default_factory=set)
|
|
136
|
+
|
|
137
|
+
def track_server_items(self, model_response: ModelResponse) -> None:
|
|
138
|
+
for output_item in model_response.output:
|
|
139
|
+
self.server_items.add(id(output_item))
|
|
140
|
+
|
|
141
|
+
# Update previous_response_id only when using previous_response_id
|
|
142
|
+
if (
|
|
143
|
+
self.conversation_id is None
|
|
144
|
+
and self.previous_response_id is not None
|
|
145
|
+
and model_response.response_id is not None
|
|
146
|
+
):
|
|
147
|
+
self.previous_response_id = model_response.response_id
|
|
148
|
+
|
|
149
|
+
def prepare_input(
|
|
150
|
+
self,
|
|
151
|
+
original_input: str | list[TResponseInputItem],
|
|
152
|
+
generated_items: list[RunItem],
|
|
153
|
+
) -> list[TResponseInputItem]:
|
|
154
|
+
input_items: list[TResponseInputItem] = []
|
|
155
|
+
|
|
156
|
+
# On first call (when there are no generated items yet), include the original input
|
|
157
|
+
if not generated_items:
|
|
158
|
+
input_items.extend(ItemHelpers.input_to_new_input_list(original_input))
|
|
159
|
+
|
|
160
|
+
# Process generated_items, skip items already sent or from server
|
|
161
|
+
for item in generated_items:
|
|
162
|
+
raw_item_id = id(item.raw_item)
|
|
163
|
+
|
|
164
|
+
if raw_item_id in self.sent_items or raw_item_id in self.server_items:
|
|
165
|
+
continue
|
|
166
|
+
input_items.append(item.to_input_item())
|
|
167
|
+
self.sent_items.add(raw_item_id)
|
|
168
|
+
|
|
169
|
+
return input_items
|
|
170
|
+
|
|
171
|
+
|
|
125
172
|
# Type alias for the optional input filter callback
|
|
126
173
|
CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
|
|
127
174
|
|
|
@@ -470,6 +517,13 @@ class AgentRunner:
|
|
|
470
517
|
if run_config is None:
|
|
471
518
|
run_config = RunConfig()
|
|
472
519
|
|
|
520
|
+
if conversation_id is not None or previous_response_id is not None:
|
|
521
|
+
server_conversation_tracker = _ServerConversationTracker(
|
|
522
|
+
conversation_id=conversation_id, previous_response_id=previous_response_id
|
|
523
|
+
)
|
|
524
|
+
else:
|
|
525
|
+
server_conversation_tracker = None
|
|
526
|
+
|
|
473
527
|
# Keep original user input separate from session-prepared input
|
|
474
528
|
original_user_input = input
|
|
475
529
|
prepared_input = await self._prepare_input_with_session(
|
|
@@ -563,8 +617,7 @@ class AgentRunner:
|
|
|
563
617
|
run_config=run_config,
|
|
564
618
|
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
565
619
|
tool_use_tracker=tool_use_tracker,
|
|
566
|
-
|
|
567
|
-
conversation_id=conversation_id,
|
|
620
|
+
server_conversation_tracker=server_conversation_tracker,
|
|
568
621
|
),
|
|
569
622
|
)
|
|
570
623
|
else:
|
|
@@ -578,8 +631,7 @@ class AgentRunner:
|
|
|
578
631
|
run_config=run_config,
|
|
579
632
|
should_run_agent_start_hooks=should_run_agent_start_hooks,
|
|
580
633
|
tool_use_tracker=tool_use_tracker,
|
|
581
|
-
|
|
582
|
-
conversation_id=conversation_id,
|
|
634
|
+
server_conversation_tracker=server_conversation_tracker,
|
|
583
635
|
)
|
|
584
636
|
should_run_agent_start_hooks = False
|
|
585
637
|
|
|
@@ -587,6 +639,9 @@ class AgentRunner:
|
|
|
587
639
|
original_input = turn_result.original_input
|
|
588
640
|
generated_items = turn_result.generated_items
|
|
589
641
|
|
|
642
|
+
if server_conversation_tracker is not None:
|
|
643
|
+
server_conversation_tracker.track_server_items(turn_result.model_response)
|
|
644
|
+
|
|
590
645
|
# Collect tool guardrail results from this turn
|
|
591
646
|
tool_input_guardrail_results.extend(turn_result.tool_input_guardrail_results)
|
|
592
647
|
tool_output_guardrail_results.extend(turn_result.tool_output_guardrail_results)
|
|
@@ -610,7 +665,13 @@ class AgentRunner:
|
|
|
610
665
|
tool_output_guardrail_results=tool_output_guardrail_results,
|
|
611
666
|
context_wrapper=context_wrapper,
|
|
612
667
|
)
|
|
613
|
-
|
|
668
|
+
if not any(
|
|
669
|
+
guardrail_result.output.tripwire_triggered
|
|
670
|
+
for guardrail_result in input_guardrail_results
|
|
671
|
+
):
|
|
672
|
+
await self._save_result_to_session(
|
|
673
|
+
session, [], turn_result.new_step_items
|
|
674
|
+
)
|
|
614
675
|
|
|
615
676
|
return result
|
|
616
677
|
elif isinstance(turn_result.next_step, NextStepHandoff):
|
|
@@ -619,7 +680,13 @@ class AgentRunner:
|
|
|
619
680
|
current_span = None
|
|
620
681
|
should_run_agent_start_hooks = True
|
|
621
682
|
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
622
|
-
|
|
683
|
+
if not any(
|
|
684
|
+
guardrail_result.output.tripwire_triggered
|
|
685
|
+
for guardrail_result in input_guardrail_results
|
|
686
|
+
):
|
|
687
|
+
await self._save_result_to_session(
|
|
688
|
+
session, [], turn_result.new_step_items
|
|
689
|
+
)
|
|
623
690
|
else:
|
|
624
691
|
raise AgentsException(
|
|
625
692
|
f"Unknown next step type: {type(turn_result.next_step)}"
|
|
@@ -863,6 +930,13 @@ class AgentRunner:
|
|
|
863
930
|
should_run_agent_start_hooks = True
|
|
864
931
|
tool_use_tracker = AgentToolUseTracker()
|
|
865
932
|
|
|
933
|
+
if conversation_id is not None or previous_response_id is not None:
|
|
934
|
+
server_conversation_tracker = _ServerConversationTracker(
|
|
935
|
+
conversation_id=conversation_id, previous_response_id=previous_response_id
|
|
936
|
+
)
|
|
937
|
+
else:
|
|
938
|
+
server_conversation_tracker = None
|
|
939
|
+
|
|
866
940
|
streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
|
|
867
941
|
|
|
868
942
|
try:
|
|
@@ -877,6 +951,12 @@ class AgentRunner:
|
|
|
877
951
|
await AgentRunner._save_result_to_session(session, starting_input, [])
|
|
878
952
|
|
|
879
953
|
while True:
|
|
954
|
+
# Check for soft cancel before starting new turn
|
|
955
|
+
if streamed_result._cancel_mode == "after_turn":
|
|
956
|
+
streamed_result.is_complete = True
|
|
957
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
958
|
+
break
|
|
959
|
+
|
|
880
960
|
if streamed_result.is_complete:
|
|
881
961
|
break
|
|
882
962
|
|
|
@@ -938,8 +1018,7 @@ class AgentRunner:
|
|
|
938
1018
|
should_run_agent_start_hooks,
|
|
939
1019
|
tool_use_tracker,
|
|
940
1020
|
all_tools,
|
|
941
|
-
|
|
942
|
-
conversation_id,
|
|
1021
|
+
server_conversation_tracker,
|
|
943
1022
|
)
|
|
944
1023
|
should_run_agent_start_hooks = False
|
|
945
1024
|
|
|
@@ -949,7 +1028,24 @@ class AgentRunner:
|
|
|
949
1028
|
streamed_result.input = turn_result.original_input
|
|
950
1029
|
streamed_result.new_items = turn_result.generated_items
|
|
951
1030
|
|
|
1031
|
+
if server_conversation_tracker is not None:
|
|
1032
|
+
server_conversation_tracker.track_server_items(turn_result.model_response)
|
|
1033
|
+
|
|
952
1034
|
if isinstance(turn_result.next_step, NextStepHandoff):
|
|
1035
|
+
# Save the conversation to session if enabled (before handoff)
|
|
1036
|
+
# Note: Non-streaming path doesn't save handoff turns immediately,
|
|
1037
|
+
# but streaming needs to for graceful cancellation support
|
|
1038
|
+
if session is not None:
|
|
1039
|
+
should_skip_session_save = (
|
|
1040
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1041
|
+
streamed_result
|
|
1042
|
+
)
|
|
1043
|
+
)
|
|
1044
|
+
if should_skip_session_save is False:
|
|
1045
|
+
await AgentRunner._save_result_to_session(
|
|
1046
|
+
session, [], turn_result.new_step_items
|
|
1047
|
+
)
|
|
1048
|
+
|
|
953
1049
|
current_agent = turn_result.next_step.new_agent
|
|
954
1050
|
current_span.finish(reset_current=True)
|
|
955
1051
|
current_span = None
|
|
@@ -957,6 +1053,12 @@ class AgentRunner:
|
|
|
957
1053
|
streamed_result._event_queue.put_nowait(
|
|
958
1054
|
AgentUpdatedStreamEvent(new_agent=current_agent)
|
|
959
1055
|
)
|
|
1056
|
+
|
|
1057
|
+
# Check for soft cancel after handoff
|
|
1058
|
+
if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
|
|
1059
|
+
streamed_result.is_complete = True
|
|
1060
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
1061
|
+
break
|
|
960
1062
|
elif isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
961
1063
|
streamed_result._output_guardrails_task = asyncio.create_task(
|
|
962
1064
|
cls._run_output_guardrails(
|
|
@@ -979,15 +1081,35 @@ class AgentRunner:
|
|
|
979
1081
|
streamed_result.is_complete = True
|
|
980
1082
|
|
|
981
1083
|
# Save the conversation to session if enabled
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
1084
|
+
if session is not None:
|
|
1085
|
+
should_skip_session_save = (
|
|
1086
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1087
|
+
streamed_result
|
|
1088
|
+
)
|
|
1089
|
+
)
|
|
1090
|
+
if should_skip_session_save is False:
|
|
1091
|
+
await AgentRunner._save_result_to_session(
|
|
1092
|
+
session, [], turn_result.new_step_items
|
|
1093
|
+
)
|
|
985
1094
|
|
|
986
1095
|
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
987
1096
|
elif isinstance(turn_result.next_step, NextStepRunAgain):
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
1097
|
+
if session is not None:
|
|
1098
|
+
should_skip_session_save = (
|
|
1099
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1100
|
+
streamed_result
|
|
1101
|
+
)
|
|
1102
|
+
)
|
|
1103
|
+
if should_skip_session_save is False:
|
|
1104
|
+
await AgentRunner._save_result_to_session(
|
|
1105
|
+
session, [], turn_result.new_step_items
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
# Check for soft cancel after turn completion
|
|
1109
|
+
if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
|
|
1110
|
+
streamed_result.is_complete = True
|
|
1111
|
+
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
1112
|
+
break
|
|
991
1113
|
except AgentsException as exc:
|
|
992
1114
|
streamed_result.is_complete = True
|
|
993
1115
|
streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
|
|
@@ -1016,6 +1138,15 @@ class AgentRunner:
|
|
|
1016
1138
|
|
|
1017
1139
|
streamed_result.is_complete = True
|
|
1018
1140
|
finally:
|
|
1141
|
+
if streamed_result._input_guardrails_task:
|
|
1142
|
+
try:
|
|
1143
|
+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
|
|
1144
|
+
streamed_result
|
|
1145
|
+
)
|
|
1146
|
+
except Exception as e:
|
|
1147
|
+
logger.debug(
|
|
1148
|
+
f"Error in streamed_result finalize for agent {current_agent.name} - {e}"
|
|
1149
|
+
)
|
|
1019
1150
|
if current_span:
|
|
1020
1151
|
current_span.finish(reset_current=True)
|
|
1021
1152
|
if streamed_result.trace:
|
|
@@ -1032,10 +1163,10 @@ class AgentRunner:
|
|
|
1032
1163
|
should_run_agent_start_hooks: bool,
|
|
1033
1164
|
tool_use_tracker: AgentToolUseTracker,
|
|
1034
1165
|
all_tools: list[Tool],
|
|
1035
|
-
|
|
1036
|
-
conversation_id: str | None,
|
|
1166
|
+
server_conversation_tracker: _ServerConversationTracker | None = None,
|
|
1037
1167
|
) -> SingleStepResult:
|
|
1038
1168
|
emitted_tool_call_ids: set[str] = set()
|
|
1169
|
+
emitted_reasoning_item_ids: set[str] = set()
|
|
1039
1170
|
|
|
1040
1171
|
if should_run_agent_start_hooks:
|
|
1041
1172
|
await asyncio.gather(
|
|
@@ -1064,8 +1195,13 @@ class AgentRunner:
|
|
|
1064
1195
|
|
|
1065
1196
|
final_response: ModelResponse | None = None
|
|
1066
1197
|
|
|
1067
|
-
|
|
1068
|
-
|
|
1198
|
+
if server_conversation_tracker is not None:
|
|
1199
|
+
input = server_conversation_tracker.prepare_input(
|
|
1200
|
+
streamed_result.input, streamed_result.new_items
|
|
1201
|
+
)
|
|
1202
|
+
else:
|
|
1203
|
+
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
|
|
1204
|
+
input.extend([item.to_input_item() for item in streamed_result.new_items])
|
|
1069
1205
|
|
|
1070
1206
|
# THIS IS THE RESOLVED CONFLICT BLOCK
|
|
1071
1207
|
filtered = await cls._maybe_filter_model_input(
|
|
@@ -1088,6 +1224,15 @@ class AgentRunner:
|
|
|
1088
1224
|
),
|
|
1089
1225
|
)
|
|
1090
1226
|
|
|
1227
|
+
previous_response_id = (
|
|
1228
|
+
server_conversation_tracker.previous_response_id
|
|
1229
|
+
if server_conversation_tracker
|
|
1230
|
+
else None
|
|
1231
|
+
)
|
|
1232
|
+
conversation_id = (
|
|
1233
|
+
server_conversation_tracker.conversation_id if server_conversation_tracker else None
|
|
1234
|
+
)
|
|
1235
|
+
|
|
1091
1236
|
# 1. Stream the output events
|
|
1092
1237
|
async for event in model.stream_response(
|
|
1093
1238
|
filtered.instructions,
|
|
@@ -1103,6 +1248,9 @@ class AgentRunner:
|
|
|
1103
1248
|
conversation_id=conversation_id,
|
|
1104
1249
|
prompt=prompt_config,
|
|
1105
1250
|
):
|
|
1251
|
+
# Emit the raw event ASAP
|
|
1252
|
+
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
1253
|
+
|
|
1106
1254
|
if isinstance(event, ResponseCompletedEvent):
|
|
1107
1255
|
usage = (
|
|
1108
1256
|
Usage(
|
|
@@ -1142,7 +1290,16 @@ class AgentRunner:
|
|
|
1142
1290
|
RunItemStreamEvent(item=tool_item, name="tool_called")
|
|
1143
1291
|
)
|
|
1144
1292
|
|
|
1145
|
-
|
|
1293
|
+
elif isinstance(output_item, ResponseReasoningItem):
|
|
1294
|
+
reasoning_id: str | None = getattr(output_item, "id", None)
|
|
1295
|
+
|
|
1296
|
+
if reasoning_id and reasoning_id not in emitted_reasoning_item_ids:
|
|
1297
|
+
emitted_reasoning_item_ids.add(reasoning_id)
|
|
1298
|
+
|
|
1299
|
+
reasoning_item = ReasoningItem(raw_item=output_item, agent=agent)
|
|
1300
|
+
streamed_result._event_queue.put_nowait(
|
|
1301
|
+
RunItemStreamEvent(item=reasoning_item, name="reasoning_item_created")
|
|
1302
|
+
)
|
|
1146
1303
|
|
|
1147
1304
|
# Call hook just after the model response is finalized.
|
|
1148
1305
|
if final_response is not None:
|
|
@@ -1196,6 +1353,18 @@ class AgentRunner:
|
|
|
1196
1353
|
)
|
|
1197
1354
|
]
|
|
1198
1355
|
|
|
1356
|
+
if emitted_reasoning_item_ids:
|
|
1357
|
+
# Filter out reasoning items that were already emitted during streaming
|
|
1358
|
+
items_to_filter = [
|
|
1359
|
+
item
|
|
1360
|
+
for item in items_to_filter
|
|
1361
|
+
if not (
|
|
1362
|
+
isinstance(item, ReasoningItem)
|
|
1363
|
+
and (reasoning_id := getattr(item.raw_item, "id", None))
|
|
1364
|
+
and reasoning_id in emitted_reasoning_item_ids
|
|
1365
|
+
)
|
|
1366
|
+
]
|
|
1367
|
+
|
|
1199
1368
|
# Filter out HandoffCallItem to avoid duplicates (already sent earlier)
|
|
1200
1369
|
items_to_filter = [
|
|
1201
1370
|
item for item in items_to_filter if not isinstance(item, HandoffCallItem)
|
|
@@ -1219,8 +1388,7 @@ class AgentRunner:
|
|
|
1219
1388
|
run_config: RunConfig,
|
|
1220
1389
|
should_run_agent_start_hooks: bool,
|
|
1221
1390
|
tool_use_tracker: AgentToolUseTracker,
|
|
1222
|
-
|
|
1223
|
-
conversation_id: str | None,
|
|
1391
|
+
server_conversation_tracker: _ServerConversationTracker | None = None,
|
|
1224
1392
|
) -> SingleStepResult:
|
|
1225
1393
|
# Ensure we run the hooks before anything else
|
|
1226
1394
|
if should_run_agent_start_hooks:
|
|
@@ -1240,8 +1408,11 @@ class AgentRunner:
|
|
|
1240
1408
|
|
|
1241
1409
|
output_schema = cls._get_output_schema(agent)
|
|
1242
1410
|
handoffs = await cls._get_handoffs(agent, context_wrapper)
|
|
1243
|
-
|
|
1244
|
-
|
|
1411
|
+
if server_conversation_tracker is not None:
|
|
1412
|
+
input = server_conversation_tracker.prepare_input(original_input, generated_items)
|
|
1413
|
+
else:
|
|
1414
|
+
input = ItemHelpers.input_to_new_input_list(original_input)
|
|
1415
|
+
input.extend([generated_item.to_input_item() for generated_item in generated_items])
|
|
1245
1416
|
|
|
1246
1417
|
new_response = await cls._get_new_response(
|
|
1247
1418
|
agent,
|
|
@@ -1254,8 +1425,7 @@ class AgentRunner:
|
|
|
1254
1425
|
context_wrapper,
|
|
1255
1426
|
run_config,
|
|
1256
1427
|
tool_use_tracker,
|
|
1257
|
-
|
|
1258
|
-
conversation_id,
|
|
1428
|
+
server_conversation_tracker,
|
|
1259
1429
|
prompt_config,
|
|
1260
1430
|
)
|
|
1261
1431
|
|
|
@@ -1459,8 +1629,7 @@ class AgentRunner:
|
|
|
1459
1629
|
context_wrapper: RunContextWrapper[TContext],
|
|
1460
1630
|
run_config: RunConfig,
|
|
1461
1631
|
tool_use_tracker: AgentToolUseTracker,
|
|
1462
|
-
|
|
1463
|
-
conversation_id: str | None,
|
|
1632
|
+
server_conversation_tracker: _ServerConversationTracker | None,
|
|
1464
1633
|
prompt_config: ResponsePromptParam | None,
|
|
1465
1634
|
) -> ModelResponse:
|
|
1466
1635
|
# Allow user to modify model input right before the call, if configured
|
|
@@ -1491,6 +1660,15 @@ class AgentRunner:
|
|
|
1491
1660
|
),
|
|
1492
1661
|
)
|
|
1493
1662
|
|
|
1663
|
+
previous_response_id = (
|
|
1664
|
+
server_conversation_tracker.previous_response_id
|
|
1665
|
+
if server_conversation_tracker
|
|
1666
|
+
else None
|
|
1667
|
+
)
|
|
1668
|
+
conversation_id = (
|
|
1669
|
+
server_conversation_tracker.conversation_id if server_conversation_tracker else None
|
|
1670
|
+
)
|
|
1671
|
+
|
|
1494
1672
|
new_response = await model.get_response(
|
|
1495
1673
|
system_instructions=filtered.instructions,
|
|
1496
1674
|
input=filtered.input,
|
|
@@ -1635,6 +1813,24 @@ class AgentRunner:
|
|
|
1635
1813
|
items_to_save = input_list + new_items_as_input
|
|
1636
1814
|
await session.add_items(items_to_save)
|
|
1637
1815
|
|
|
1816
|
+
@staticmethod
|
|
1817
|
+
async def _input_guardrail_tripwire_triggered_for_stream(
|
|
1818
|
+
streamed_result: RunResultStreaming,
|
|
1819
|
+
) -> bool:
|
|
1820
|
+
"""Return True if any input guardrail triggered during a streamed run."""
|
|
1821
|
+
|
|
1822
|
+
task = streamed_result._input_guardrails_task
|
|
1823
|
+
if task is None:
|
|
1824
|
+
return False
|
|
1825
|
+
|
|
1826
|
+
if not task.done():
|
|
1827
|
+
await task
|
|
1828
|
+
|
|
1829
|
+
return any(
|
|
1830
|
+
guardrail_result.output.tripwire_triggered
|
|
1831
|
+
for guardrail_result in streamed_result.input_guardrail_results
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1638
1834
|
|
|
1639
1835
|
DEFAULT_AGENT_RUNNER = AgentRunner()
|
|
1640
1836
|
_TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
|
agents/stream_events.py
CHANGED
agents/strict_schema.py
CHANGED
|
@@ -87,6 +87,20 @@ def _ensure_strict_json_schema(
|
|
|
87
87
|
for i, variant in enumerate(any_of)
|
|
88
88
|
]
|
|
89
89
|
|
|
90
|
+
# oneOf is not supported by OpenAI's structured outputs in nested contexts,
|
|
91
|
+
# so we convert it to anyOf which provides equivalent functionality for
|
|
92
|
+
# discriminated unions
|
|
93
|
+
one_of = json_schema.get("oneOf")
|
|
94
|
+
if is_list(one_of):
|
|
95
|
+
existing_any_of = json_schema.get("anyOf", [])
|
|
96
|
+
if not is_list(existing_any_of):
|
|
97
|
+
existing_any_of = []
|
|
98
|
+
json_schema["anyOf"] = existing_any_of + [
|
|
99
|
+
_ensure_strict_json_schema(variant, path=(*path, "oneOf", str(i)), root=root)
|
|
100
|
+
for i, variant in enumerate(one_of)
|
|
101
|
+
]
|
|
102
|
+
json_schema.pop("oneOf")
|
|
103
|
+
|
|
90
104
|
# intersections
|
|
91
105
|
all_of = json_schema.get("allOf")
|
|
92
106
|
if is_list(all_of):
|
agents/tool.py
CHANGED
|
@@ -15,14 +15,13 @@ from openai.types.responses.response_output_item import LocalShellCall, McpAppro
|
|
|
15
15
|
from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
|
|
16
16
|
from openai.types.responses.web_search_tool import Filters as WebSearchToolFilters
|
|
17
17
|
from openai.types.responses.web_search_tool_param import UserLocation
|
|
18
|
-
from pydantic import ValidationError
|
|
18
|
+
from pydantic import BaseModel, TypeAdapter, ValidationError, model_validator
|
|
19
19
|
from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
|
|
20
20
|
|
|
21
21
|
from . import _debug
|
|
22
22
|
from .computer import AsyncComputer, Computer
|
|
23
23
|
from .exceptions import ModelBehaviorError
|
|
24
24
|
from .function_schema import DocstringStyle, function_schema
|
|
25
|
-
from .items import RunItem
|
|
26
25
|
from .logger import logger
|
|
27
26
|
from .run_context import RunContextWrapper
|
|
28
27
|
from .strict_schema import ensure_strict_json_schema
|
|
@@ -34,6 +33,8 @@ from .util._types import MaybeAwaitable
|
|
|
34
33
|
|
|
35
34
|
if TYPE_CHECKING:
|
|
36
35
|
from .agent import Agent, AgentBase
|
|
36
|
+
from .items import RunItem
|
|
37
|
+
|
|
37
38
|
|
|
38
39
|
ToolParams = ParamSpec("ToolParams")
|
|
39
40
|
|
|
@@ -48,6 +49,86 @@ ToolFunction = Union[
|
|
|
48
49
|
]
|
|
49
50
|
|
|
50
51
|
|
|
52
|
+
class ToolOutputText(BaseModel):
|
|
53
|
+
"""Represents a tool output that should be sent to the model as text."""
|
|
54
|
+
|
|
55
|
+
type: Literal["text"] = "text"
|
|
56
|
+
text: str
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ToolOutputTextDict(TypedDict, total=False):
|
|
60
|
+
"""TypedDict variant for text tool outputs."""
|
|
61
|
+
|
|
62
|
+
type: Literal["text"]
|
|
63
|
+
text: str
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ToolOutputImage(BaseModel):
|
|
67
|
+
"""Represents a tool output that should be sent to the model as an image.
|
|
68
|
+
|
|
69
|
+
You can provide either an `image_url` (URL or data URL) or a `file_id` for previously uploaded
|
|
70
|
+
content. The optional `detail` can control vision detail.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
type: Literal["image"] = "image"
|
|
74
|
+
image_url: str | None = None
|
|
75
|
+
file_id: str | None = None
|
|
76
|
+
detail: Literal["low", "high", "auto"] | None = None
|
|
77
|
+
|
|
78
|
+
@model_validator(mode="after")
|
|
79
|
+
def check_at_least_one_required_field(self) -> ToolOutputImage:
|
|
80
|
+
"""Validate that at least one of image_url or file_id is provided."""
|
|
81
|
+
if self.image_url is None and self.file_id is None:
|
|
82
|
+
raise ValueError("At least one of image_url or file_id must be provided")
|
|
83
|
+
return self
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class ToolOutputImageDict(TypedDict, total=False):
|
|
87
|
+
"""TypedDict variant for image tool outputs."""
|
|
88
|
+
|
|
89
|
+
type: Literal["image"]
|
|
90
|
+
image_url: NotRequired[str]
|
|
91
|
+
file_id: NotRequired[str]
|
|
92
|
+
detail: NotRequired[Literal["low", "high", "auto"]]
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class ToolOutputFileContent(BaseModel):
|
|
96
|
+
"""Represents a tool output that should be sent to the model as a file.
|
|
97
|
+
|
|
98
|
+
Provide one of `file_data` (base64), `file_url`, or `file_id`. You may also
|
|
99
|
+
provide an optional `filename` when using `file_data` to hint file name.
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
type: Literal["file"] = "file"
|
|
103
|
+
file_data: str | None = None
|
|
104
|
+
file_url: str | None = None
|
|
105
|
+
file_id: str | None = None
|
|
106
|
+
filename: str | None = None
|
|
107
|
+
|
|
108
|
+
@model_validator(mode="after")
|
|
109
|
+
def check_at_least_one_required_field(self) -> ToolOutputFileContent:
|
|
110
|
+
"""Validate that at least one of file_data, file_url, or file_id is provided."""
|
|
111
|
+
if self.file_data is None and self.file_url is None and self.file_id is None:
|
|
112
|
+
raise ValueError("At least one of file_data, file_url, or file_id must be provided")
|
|
113
|
+
return self
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class ToolOutputFileContentDict(TypedDict, total=False):
|
|
117
|
+
"""TypedDict variant for file content tool outputs."""
|
|
118
|
+
|
|
119
|
+
type: Literal["file"]
|
|
120
|
+
file_data: NotRequired[str]
|
|
121
|
+
file_url: NotRequired[str]
|
|
122
|
+
file_id: NotRequired[str]
|
|
123
|
+
filename: NotRequired[str]
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
ValidToolOutputPydanticModels = Union[ToolOutputText, ToolOutputImage, ToolOutputFileContent]
|
|
127
|
+
ValidToolOutputPydanticModelsTypeAdapter: TypeAdapter[ValidToolOutputPydanticModels] = TypeAdapter(
|
|
128
|
+
ValidToolOutputPydanticModels
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
|
|
51
132
|
@dataclass
|
|
52
133
|
class FunctionToolResult:
|
|
53
134
|
tool: FunctionTool
|
|
@@ -81,7 +162,9 @@ class FunctionTool:
|
|
|
81
162
|
1. The tool run context.
|
|
82
163
|
2. The arguments from the LLM, as a JSON string.
|
|
83
164
|
|
|
84
|
-
You must return a
|
|
165
|
+
You must return a one of the structured tool output types (e.g. ToolOutputText, ToolOutputImage,
|
|
166
|
+
ToolOutputFileContent) or a string representation of the tool output, or a list of them,
|
|
167
|
+
or something we can call `str()` on.
|
|
85
168
|
In case of errors, you can either raise an Exception (which will cause the run to fail) or
|
|
86
169
|
return a string error message (which will be sent back to the LLM).
|
|
87
170
|
"""
|
|
@@ -122,7 +122,8 @@ class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):
|
|
|
122
122
|
return
|
|
123
123
|
|
|
124
124
|
if self._tracing_span:
|
|
125
|
-
if
|
|
125
|
+
# Only encode audio if tracing is enabled AND buffer is not empty
|
|
126
|
+
if self._trace_include_sensitive_audio_data and self._turn_audio_buffer:
|
|
126
127
|
self._tracing_span.span_data.input = _audio_to_base64(self._turn_audio_buffer)
|
|
127
128
|
|
|
128
129
|
self._tracing_span.span_data.input_format = "pcm"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.1
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
|
|
|
21
21
|
Requires-Python: >=3.9
|
|
22
22
|
Requires-Dist: griffe<2,>=1.5.6
|
|
23
23
|
Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
|
|
24
|
-
Requires-Dist: openai<2
|
|
24
|
+
Requires-Dist: openai<3,>=2.2
|
|
25
25
|
Requires-Dist: pydantic<3,>=2.10
|
|
26
26
|
Requires-Dist: requests<3,>=2.0
|
|
27
27
|
Requires-Dist: types-requests<3,>=2.0
|