openai-agents 0.3.2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/result.py CHANGED
@@ -4,7 +4,7 @@ import abc
4
4
  import asyncio
5
5
  from collections.abc import AsyncIterator
6
6
  from dataclasses import dataclass, field
7
- from typing import TYPE_CHECKING, Any, cast
7
+ from typing import TYPE_CHECKING, Any, Literal, cast
8
8
 
9
9
  from typing_extensions import TypeVar
10
10
 
@@ -31,6 +31,7 @@ from .util._pretty_print import (
31
31
  if TYPE_CHECKING:
32
32
  from ._run_impl import QueueCompleteSentinel
33
33
  from .agent import Agent
34
+ from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
34
35
 
35
36
  T = TypeVar("T")
36
37
 
@@ -59,6 +60,12 @@ class RunResultBase(abc.ABC):
59
60
  output_guardrail_results: list[OutputGuardrailResult]
60
61
  """Guardrail results for the final output of the agent."""
61
62
 
63
+ tool_input_guardrail_results: list[ToolInputGuardrailResult]
64
+ """Tool input guardrail results from all tools executed during the run."""
65
+
66
+ tool_output_guardrail_results: list[ToolOutputGuardrailResult]
67
+ """Tool output guardrail results from all tools executed during the run."""
68
+
62
69
  context_wrapper: RunContextWrapper[Any]
63
70
  """The context wrapper for the agent run."""
64
71
 
@@ -157,6 +164,9 @@ class RunResultStreaming(RunResultBase):
157
164
  _output_guardrails_task: asyncio.Task[Any] | None = field(default=None, repr=False)
158
165
  _stored_exception: Exception | None = field(default=None, repr=False)
159
166
 
167
+ # Soft cancel state
168
+ _cancel_mode: Literal["none", "immediate", "after_turn"] = field(default="none", repr=False)
169
+
160
170
  @property
161
171
  def last_agent(self) -> Agent[Any]:
162
172
  """The last agent that was run. Updates as the agent run progresses, so the true last agent
@@ -164,17 +174,51 @@ class RunResultStreaming(RunResultBase):
164
174
  """
165
175
  return self.current_agent
166
176
 
167
- def cancel(self) -> None:
168
- """Cancels the streaming run, stopping all background tasks and marking the run as
169
- complete."""
170
- self._cleanup_tasks() # Cancel all running tasks
171
- self.is_complete = True # Mark the run as complete to stop event streaming
177
+ def cancel(self, mode: Literal["immediate", "after_turn"] = "immediate") -> None:
178
+ """Cancel the streaming run.
172
179
 
173
- # Optionally, clear the event queue to prevent processing stale events
174
- while not self._event_queue.empty():
175
- self._event_queue.get_nowait()
176
- while not self._input_guardrail_queue.empty():
177
- self._input_guardrail_queue.get_nowait()
180
+ Args:
181
+ mode: Cancellation strategy:
182
+ - "immediate": Stop immediately, cancel all tasks, clear queues (default)
183
+ - "after_turn": Complete current turn gracefully before stopping
184
+ * Allows LLM response to finish
185
+ * Executes pending tool calls
186
+ * Saves session state properly
187
+ * Tracks usage accurately
188
+ * Stops before next turn begins
189
+
190
+ Example:
191
+ ```python
192
+ result = Runner.run_streamed(agent, "Task", session=session)
193
+
194
+ async for event in result.stream_events():
195
+ if user_interrupted():
196
+ result.cancel(mode="after_turn") # Graceful
197
+ # result.cancel() # Immediate (default)
198
+ ```
199
+
200
+ Note: After calling cancel(), you should continue consuming stream_events()
201
+ to allow the cancellation to complete properly.
202
+ """
203
+ # Store the cancel mode for the background task to check
204
+ self._cancel_mode = mode
205
+
206
+ if mode == "immediate":
207
+ # Existing behavior - immediate shutdown
208
+ self._cleanup_tasks() # Cancel all running tasks
209
+ self.is_complete = True # Mark the run as complete to stop event streaming
210
+
211
+ # Optionally, clear the event queue to prevent processing stale events
212
+ while not self._event_queue.empty():
213
+ self._event_queue.get_nowait()
214
+ while not self._input_guardrail_queue.empty():
215
+ self._input_guardrail_queue.get_nowait()
216
+
217
+ elif mode == "after_turn":
218
+ # Soft cancel - just set the flag
219
+ # The streaming loop will check this and stop gracefully
220
+ # Don't call _cleanup_tasks() or clear queues yet
221
+ pass
178
222
 
179
223
  async def stream_events(self) -> AsyncIterator[StreamEvent]:
180
224
  """Stream deltas for new items as they are generated. We're using the types from the
agents/run.py CHANGED
@@ -13,6 +13,7 @@ from openai.types.responses import (
13
13
  from openai.types.responses.response_prompt_param import (
14
14
  ResponsePromptParam,
15
15
  )
16
+ from openai.types.responses.response_reasoning_item import ResponseReasoningItem
16
17
  from typing_extensions import NotRequired, TypedDict, Unpack
17
18
 
18
19
  from ._run_impl import (
@@ -48,6 +49,7 @@ from .items import (
48
49
  HandoffCallItem,
49
50
  ItemHelpers,
50
51
  ModelResponse,
52
+ ReasoningItem,
51
53
  RunItem,
52
54
  ToolCallItem,
53
55
  ToolCallItemTypes,
@@ -68,6 +70,7 @@ from .stream_events import (
68
70
  StreamEvent,
69
71
  )
70
72
  from .tool import Tool
73
+ from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
71
74
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
72
75
  from .tracing.span_data import AgentSpanData
73
76
  from .usage import Usage
@@ -121,6 +124,51 @@ class CallModelData(Generic[TContext]):
121
124
  context: TContext | None
122
125
 
123
126
 
127
+ @dataclass
128
+ class _ServerConversationTracker:
129
+ """Tracks server-side conversation state for either conversation_id or
130
+ previous_response_id modes."""
131
+
132
+ conversation_id: str | None = None
133
+ previous_response_id: str | None = None
134
+ sent_items: set[int] = field(default_factory=set)
135
+ server_items: set[int] = field(default_factory=set)
136
+
137
+ def track_server_items(self, model_response: ModelResponse) -> None:
138
+ for output_item in model_response.output:
139
+ self.server_items.add(id(output_item))
140
+
141
+ # Update previous_response_id only when using previous_response_id
142
+ if (
143
+ self.conversation_id is None
144
+ and self.previous_response_id is not None
145
+ and model_response.response_id is not None
146
+ ):
147
+ self.previous_response_id = model_response.response_id
148
+
149
+ def prepare_input(
150
+ self,
151
+ original_input: str | list[TResponseInputItem],
152
+ generated_items: list[RunItem],
153
+ ) -> list[TResponseInputItem]:
154
+ input_items: list[TResponseInputItem] = []
155
+
156
+ # On first call (when there are no generated items yet), include the original input
157
+ if not generated_items:
158
+ input_items.extend(ItemHelpers.input_to_new_input_list(original_input))
159
+
160
+ # Process generated_items, skip items already sent or from server
161
+ for item in generated_items:
162
+ raw_item_id = id(item.raw_item)
163
+
164
+ if raw_item_id in self.sent_items or raw_item_id in self.server_items:
165
+ continue
166
+ input_items.append(item.to_input_item())
167
+ self.sent_items.add(raw_item_id)
168
+
169
+ return input_items
170
+
171
+
124
172
  # Type alias for the optional input filter callback
125
173
  CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
126
174
 
@@ -469,6 +517,13 @@ class AgentRunner:
469
517
  if run_config is None:
470
518
  run_config = RunConfig()
471
519
 
520
+ if conversation_id is not None or previous_response_id is not None:
521
+ server_conversation_tracker = _ServerConversationTracker(
522
+ conversation_id=conversation_id, previous_response_id=previous_response_id
523
+ )
524
+ else:
525
+ server_conversation_tracker = None
526
+
472
527
  # Keep original user input separate from session-prepared input
473
528
  original_user_input = input
474
529
  prepared_input = await self._prepare_input_with_session(
@@ -494,6 +549,8 @@ class AgentRunner:
494
549
  )
495
550
 
496
551
  input_guardrail_results: list[InputGuardrailResult] = []
552
+ tool_input_guardrail_results: list[ToolInputGuardrailResult] = []
553
+ tool_output_guardrail_results: list[ToolOutputGuardrailResult] = []
497
554
 
498
555
  current_span: Span[AgentSpanData] | None = None
499
556
  current_agent = starting_agent
@@ -560,8 +617,7 @@ class AgentRunner:
560
617
  run_config=run_config,
561
618
  should_run_agent_start_hooks=should_run_agent_start_hooks,
562
619
  tool_use_tracker=tool_use_tracker,
563
- previous_response_id=previous_response_id,
564
- conversation_id=conversation_id,
620
+ server_conversation_tracker=server_conversation_tracker,
565
621
  ),
566
622
  )
567
623
  else:
@@ -575,8 +631,7 @@ class AgentRunner:
575
631
  run_config=run_config,
576
632
  should_run_agent_start_hooks=should_run_agent_start_hooks,
577
633
  tool_use_tracker=tool_use_tracker,
578
- previous_response_id=previous_response_id,
579
- conversation_id=conversation_id,
634
+ server_conversation_tracker=server_conversation_tracker,
580
635
  )
581
636
  should_run_agent_start_hooks = False
582
637
 
@@ -584,6 +639,13 @@ class AgentRunner:
584
639
  original_input = turn_result.original_input
585
640
  generated_items = turn_result.generated_items
586
641
 
642
+ if server_conversation_tracker is not None:
643
+ server_conversation_tracker.track_server_items(turn_result.model_response)
644
+
645
+ # Collect tool guardrail results from this turn
646
+ tool_input_guardrail_results.extend(turn_result.tool_input_guardrail_results)
647
+ tool_output_guardrail_results.extend(turn_result.tool_output_guardrail_results)
648
+
587
649
  if isinstance(turn_result.next_step, NextStepFinalOutput):
588
650
  output_guardrail_results = await self._run_output_guardrails(
589
651
  current_agent.output_guardrails + (run_config.output_guardrails or []),
@@ -599,9 +661,17 @@ class AgentRunner:
599
661
  _last_agent=current_agent,
600
662
  input_guardrail_results=input_guardrail_results,
601
663
  output_guardrail_results=output_guardrail_results,
664
+ tool_input_guardrail_results=tool_input_guardrail_results,
665
+ tool_output_guardrail_results=tool_output_guardrail_results,
602
666
  context_wrapper=context_wrapper,
603
667
  )
604
- await self._save_result_to_session(session, [], turn_result.new_step_items)
668
+ if not any(
669
+ guardrail_result.output.tripwire_triggered
670
+ for guardrail_result in input_guardrail_results
671
+ ):
672
+ await self._save_result_to_session(
673
+ session, [], turn_result.new_step_items
674
+ )
605
675
 
606
676
  return result
607
677
  elif isinstance(turn_result.next_step, NextStepHandoff):
@@ -610,7 +680,13 @@ class AgentRunner:
610
680
  current_span = None
611
681
  should_run_agent_start_hooks = True
612
682
  elif isinstance(turn_result.next_step, NextStepRunAgain):
613
- await self._save_result_to_session(session, [], turn_result.new_step_items)
683
+ if not any(
684
+ guardrail_result.output.tripwire_triggered
685
+ for guardrail_result in input_guardrail_results
686
+ ):
687
+ await self._save_result_to_session(
688
+ session, [], turn_result.new_step_items
689
+ )
614
690
  else:
615
691
  raise AgentsException(
616
692
  f"Unknown next step type: {type(turn_result.next_step)}"
@@ -706,6 +782,8 @@ class AgentRunner:
706
782
  max_turns=max_turns,
707
783
  input_guardrail_results=[],
708
784
  output_guardrail_results=[],
785
+ tool_input_guardrail_results=[],
786
+ tool_output_guardrail_results=[],
709
787
  _current_agent_output_schema=output_schema,
710
788
  trace=new_trace,
711
789
  context_wrapper=context_wrapper,
@@ -852,6 +930,13 @@ class AgentRunner:
852
930
  should_run_agent_start_hooks = True
853
931
  tool_use_tracker = AgentToolUseTracker()
854
932
 
933
+ if conversation_id is not None or previous_response_id is not None:
934
+ server_conversation_tracker = _ServerConversationTracker(
935
+ conversation_id=conversation_id, previous_response_id=previous_response_id
936
+ )
937
+ else:
938
+ server_conversation_tracker = None
939
+
855
940
  streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
856
941
 
857
942
  try:
@@ -866,6 +951,12 @@ class AgentRunner:
866
951
  await AgentRunner._save_result_to_session(session, starting_input, [])
867
952
 
868
953
  while True:
954
+ # Check for soft cancel before starting new turn
955
+ if streamed_result._cancel_mode == "after_turn":
956
+ streamed_result.is_complete = True
957
+ streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
958
+ break
959
+
869
960
  if streamed_result.is_complete:
870
961
  break
871
962
 
@@ -927,8 +1018,7 @@ class AgentRunner:
927
1018
  should_run_agent_start_hooks,
928
1019
  tool_use_tracker,
929
1020
  all_tools,
930
- previous_response_id,
931
- conversation_id,
1021
+ server_conversation_tracker,
932
1022
  )
933
1023
  should_run_agent_start_hooks = False
934
1024
 
@@ -938,7 +1028,24 @@ class AgentRunner:
938
1028
  streamed_result.input = turn_result.original_input
939
1029
  streamed_result.new_items = turn_result.generated_items
940
1030
 
1031
+ if server_conversation_tracker is not None:
1032
+ server_conversation_tracker.track_server_items(turn_result.model_response)
1033
+
941
1034
  if isinstance(turn_result.next_step, NextStepHandoff):
1035
+ # Save the conversation to session if enabled (before handoff)
1036
+ # Note: Non-streaming path doesn't save handoff turns immediately,
1037
+ # but streaming needs to for graceful cancellation support
1038
+ if session is not None:
1039
+ should_skip_session_save = (
1040
+ await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
1041
+ streamed_result
1042
+ )
1043
+ )
1044
+ if should_skip_session_save is False:
1045
+ await AgentRunner._save_result_to_session(
1046
+ session, [], turn_result.new_step_items
1047
+ )
1048
+
942
1049
  current_agent = turn_result.next_step.new_agent
943
1050
  current_span.finish(reset_current=True)
944
1051
  current_span = None
@@ -946,6 +1053,12 @@ class AgentRunner:
946
1053
  streamed_result._event_queue.put_nowait(
947
1054
  AgentUpdatedStreamEvent(new_agent=current_agent)
948
1055
  )
1056
+
1057
+ # Check for soft cancel after handoff
1058
+ if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
1059
+ streamed_result.is_complete = True
1060
+ streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
1061
+ break
949
1062
  elif isinstance(turn_result.next_step, NextStepFinalOutput):
950
1063
  streamed_result._output_guardrails_task = asyncio.create_task(
951
1064
  cls._run_output_guardrails(
@@ -968,15 +1081,35 @@ class AgentRunner:
968
1081
  streamed_result.is_complete = True
969
1082
 
970
1083
  # Save the conversation to session if enabled
971
- await AgentRunner._save_result_to_session(
972
- session, [], turn_result.new_step_items
973
- )
1084
+ if session is not None:
1085
+ should_skip_session_save = (
1086
+ await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
1087
+ streamed_result
1088
+ )
1089
+ )
1090
+ if should_skip_session_save is False:
1091
+ await AgentRunner._save_result_to_session(
1092
+ session, [], turn_result.new_step_items
1093
+ )
974
1094
 
975
1095
  streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
976
1096
  elif isinstance(turn_result.next_step, NextStepRunAgain):
977
- await AgentRunner._save_result_to_session(
978
- session, [], turn_result.new_step_items
979
- )
1097
+ if session is not None:
1098
+ should_skip_session_save = (
1099
+ await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
1100
+ streamed_result
1101
+ )
1102
+ )
1103
+ if should_skip_session_save is False:
1104
+ await AgentRunner._save_result_to_session(
1105
+ session, [], turn_result.new_step_items
1106
+ )
1107
+
1108
+ # Check for soft cancel after turn completion
1109
+ if streamed_result._cancel_mode == "after_turn": # type: ignore[comparison-overlap]
1110
+ streamed_result.is_complete = True
1111
+ streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
1112
+ break
980
1113
  except AgentsException as exc:
981
1114
  streamed_result.is_complete = True
982
1115
  streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
@@ -1021,10 +1154,10 @@ class AgentRunner:
1021
1154
  should_run_agent_start_hooks: bool,
1022
1155
  tool_use_tracker: AgentToolUseTracker,
1023
1156
  all_tools: list[Tool],
1024
- previous_response_id: str | None,
1025
- conversation_id: str | None,
1157
+ server_conversation_tracker: _ServerConversationTracker | None = None,
1026
1158
  ) -> SingleStepResult:
1027
1159
  emitted_tool_call_ids: set[str] = set()
1160
+ emitted_reasoning_item_ids: set[str] = set()
1028
1161
 
1029
1162
  if should_run_agent_start_hooks:
1030
1163
  await asyncio.gather(
@@ -1053,8 +1186,13 @@ class AgentRunner:
1053
1186
 
1054
1187
  final_response: ModelResponse | None = None
1055
1188
 
1056
- input = ItemHelpers.input_to_new_input_list(streamed_result.input)
1057
- input.extend([item.to_input_item() for item in streamed_result.new_items])
1189
+ if server_conversation_tracker is not None:
1190
+ input = server_conversation_tracker.prepare_input(
1191
+ streamed_result.input, streamed_result.new_items
1192
+ )
1193
+ else:
1194
+ input = ItemHelpers.input_to_new_input_list(streamed_result.input)
1195
+ input.extend([item.to_input_item() for item in streamed_result.new_items])
1058
1196
 
1059
1197
  # THIS IS THE RESOLVED CONFLICT BLOCK
1060
1198
  filtered = await cls._maybe_filter_model_input(
@@ -1077,6 +1215,15 @@ class AgentRunner:
1077
1215
  ),
1078
1216
  )
1079
1217
 
1218
+ previous_response_id = (
1219
+ server_conversation_tracker.previous_response_id
1220
+ if server_conversation_tracker
1221
+ else None
1222
+ )
1223
+ conversation_id = (
1224
+ server_conversation_tracker.conversation_id if server_conversation_tracker else None
1225
+ )
1226
+
1080
1227
  # 1. Stream the output events
1081
1228
  async for event in model.stream_response(
1082
1229
  filtered.instructions,
@@ -1092,6 +1239,9 @@ class AgentRunner:
1092
1239
  conversation_id=conversation_id,
1093
1240
  prompt=prompt_config,
1094
1241
  ):
1242
+ # Emit the raw event ASAP
1243
+ streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
1244
+
1095
1245
  if isinstance(event, ResponseCompletedEvent):
1096
1246
  usage = (
1097
1247
  Usage(
@@ -1131,7 +1281,16 @@ class AgentRunner:
1131
1281
  RunItemStreamEvent(item=tool_item, name="tool_called")
1132
1282
  )
1133
1283
 
1134
- streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
1284
+ elif isinstance(output_item, ResponseReasoningItem):
1285
+ reasoning_id: str | None = getattr(output_item, "id", None)
1286
+
1287
+ if reasoning_id and reasoning_id not in emitted_reasoning_item_ids:
1288
+ emitted_reasoning_item_ids.add(reasoning_id)
1289
+
1290
+ reasoning_item = ReasoningItem(raw_item=output_item, agent=agent)
1291
+ streamed_result._event_queue.put_nowait(
1292
+ RunItemStreamEvent(item=reasoning_item, name="reasoning_item_created")
1293
+ )
1135
1294
 
1136
1295
  # Call hook just after the model response is finalized.
1137
1296
  if final_response is not None:
@@ -1185,6 +1344,18 @@ class AgentRunner:
1185
1344
  )
1186
1345
  ]
1187
1346
 
1347
+ if emitted_reasoning_item_ids:
1348
+ # Filter out reasoning items that were already emitted during streaming
1349
+ items_to_filter = [
1350
+ item
1351
+ for item in items_to_filter
1352
+ if not (
1353
+ isinstance(item, ReasoningItem)
1354
+ and (reasoning_id := getattr(item.raw_item, "id", None))
1355
+ and reasoning_id in emitted_reasoning_item_ids
1356
+ )
1357
+ ]
1358
+
1188
1359
  # Filter out HandoffCallItem to avoid duplicates (already sent earlier)
1189
1360
  items_to_filter = [
1190
1361
  item for item in items_to_filter if not isinstance(item, HandoffCallItem)
@@ -1208,8 +1379,7 @@ class AgentRunner:
1208
1379
  run_config: RunConfig,
1209
1380
  should_run_agent_start_hooks: bool,
1210
1381
  tool_use_tracker: AgentToolUseTracker,
1211
- previous_response_id: str | None,
1212
- conversation_id: str | None,
1382
+ server_conversation_tracker: _ServerConversationTracker | None = None,
1213
1383
  ) -> SingleStepResult:
1214
1384
  # Ensure we run the hooks before anything else
1215
1385
  if should_run_agent_start_hooks:
@@ -1229,8 +1399,11 @@ class AgentRunner:
1229
1399
 
1230
1400
  output_schema = cls._get_output_schema(agent)
1231
1401
  handoffs = await cls._get_handoffs(agent, context_wrapper)
1232
- input = ItemHelpers.input_to_new_input_list(original_input)
1233
- input.extend([generated_item.to_input_item() for generated_item in generated_items])
1402
+ if server_conversation_tracker is not None:
1403
+ input = server_conversation_tracker.prepare_input(original_input, generated_items)
1404
+ else:
1405
+ input = ItemHelpers.input_to_new_input_list(original_input)
1406
+ input.extend([generated_item.to_input_item() for generated_item in generated_items])
1234
1407
 
1235
1408
  new_response = await cls._get_new_response(
1236
1409
  agent,
@@ -1243,8 +1416,7 @@ class AgentRunner:
1243
1416
  context_wrapper,
1244
1417
  run_config,
1245
1418
  tool_use_tracker,
1246
- previous_response_id,
1247
- conversation_id,
1419
+ server_conversation_tracker,
1248
1420
  prompt_config,
1249
1421
  )
1250
1422
 
@@ -1448,8 +1620,7 @@ class AgentRunner:
1448
1620
  context_wrapper: RunContextWrapper[TContext],
1449
1621
  run_config: RunConfig,
1450
1622
  tool_use_tracker: AgentToolUseTracker,
1451
- previous_response_id: str | None,
1452
- conversation_id: str | None,
1623
+ server_conversation_tracker: _ServerConversationTracker | None,
1453
1624
  prompt_config: ResponsePromptParam | None,
1454
1625
  ) -> ModelResponse:
1455
1626
  # Allow user to modify model input right before the call, if configured
@@ -1480,6 +1651,15 @@ class AgentRunner:
1480
1651
  ),
1481
1652
  )
1482
1653
 
1654
+ previous_response_id = (
1655
+ server_conversation_tracker.previous_response_id
1656
+ if server_conversation_tracker
1657
+ else None
1658
+ )
1659
+ conversation_id = (
1660
+ server_conversation_tracker.conversation_id if server_conversation_tracker else None
1661
+ )
1662
+
1483
1663
  new_response = await model.get_response(
1484
1664
  system_instructions=filtered.instructions,
1485
1665
  input=filtered.input,
@@ -1624,6 +1804,24 @@ class AgentRunner:
1624
1804
  items_to_save = input_list + new_items_as_input
1625
1805
  await session.add_items(items_to_save)
1626
1806
 
1807
+ @staticmethod
1808
+ async def _input_guardrail_tripwire_triggered_for_stream(
1809
+ streamed_result: RunResultStreaming,
1810
+ ) -> bool:
1811
+ """Return True if any input guardrail triggered during a streamed run."""
1812
+
1813
+ task = streamed_result._input_guardrails_task
1814
+ if task is None:
1815
+ return False
1816
+
1817
+ if not task.done():
1818
+ await task
1819
+
1820
+ return any(
1821
+ guardrail_result.output.tripwire_triggered
1822
+ for guardrail_result in streamed_result.input_guardrail_results
1823
+ )
1824
+
1627
1825
 
1628
1826
  DEFAULT_AGENT_RUNNER = AgentRunner()
1629
1827
  _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
agents/strict_schema.py CHANGED
@@ -87,6 +87,20 @@ def _ensure_strict_json_schema(
87
87
  for i, variant in enumerate(any_of)
88
88
  ]
89
89
 
90
+ # oneOf is not supported by OpenAI's structured outputs in nested contexts,
91
+ # so we convert it to anyOf which provides equivalent functionality for
92
+ # discriminated unions
93
+ one_of = json_schema.get("oneOf")
94
+ if is_list(one_of):
95
+ existing_any_of = json_schema.get("anyOf", [])
96
+ if not is_list(existing_any_of):
97
+ existing_any_of = []
98
+ json_schema["anyOf"] = existing_any_of + [
99
+ _ensure_strict_json_schema(variant, path=(*path, "oneOf", str(i)), root=root)
100
+ for i, variant in enumerate(one_of)
101
+ ]
102
+ json_schema.pop("oneOf")
103
+
90
104
  # intersections
91
105
  all_of = json_schema.get("allOf")
92
106
  if is_list(all_of):