openai-agents 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -62,6 +62,9 @@ class StreamingState:
62
62
  # Fields for real-time function call streaming
63
63
  function_call_streaming: dict[int, bool] = field(default_factory=dict)
64
64
  function_call_output_idx: dict[int, int] = field(default_factory=dict)
65
+ # Store accumulated thinking text and signature for Anthropic compatibility
66
+ thinking_text: str = ""
67
+ thinking_signature: str | None = None
65
68
 
66
69
 
67
70
  class SequenceNumber:
@@ -101,6 +104,19 @@ class ChatCmplStreamHandler:
101
104
 
102
105
  delta = chunk.choices[0].delta
103
106
 
107
+ # Handle thinking blocks from Anthropic (for preserving signatures)
108
+ if hasattr(delta, "thinking_blocks") and delta.thinking_blocks:
109
+ for block in delta.thinking_blocks:
110
+ if isinstance(block, dict):
111
+ # Accumulate thinking text
112
+ thinking_text = block.get("thinking", "")
113
+ if thinking_text:
114
+ state.thinking_text += thinking_text
115
+ # Store signature if present
116
+ signature = block.get("signature")
117
+ if signature:
118
+ state.thinking_signature = signature
119
+
104
120
  # Handle reasoning content for reasoning summaries
105
121
  if hasattr(delta, "reasoning_content"):
106
122
  reasoning_content = delta.reasoning_content
@@ -527,7 +543,19 @@ class ChatCmplStreamHandler:
527
543
 
528
544
  # include Reasoning item if it exists
529
545
  if state.reasoning_content_index_and_output:
530
- outputs.append(state.reasoning_content_index_and_output[1])
546
+ reasoning_item = state.reasoning_content_index_and_output[1]
547
+ # Store thinking text in content and signature in encrypted_content
548
+ if state.thinking_text:
549
+ # Add thinking text as a Content object
550
+ if not reasoning_item.content:
551
+ reasoning_item.content = []
552
+ reasoning_item.content.append(
553
+ Content(text=state.thinking_text, type="reasoning_text")
554
+ )
555
+ # Store signature in encrypted_content
556
+ if state.thinking_signature:
557
+ reasoning_item.encrypted_content = state.thinking_signature
558
+ outputs.append(reasoning_item)
531
559
 
532
560
  # include text or refusal content if they exist
533
561
  if state.text_content_index_and_output or state.refusal_content_index_and_output:
@@ -25,7 +25,7 @@ from ..tracing.spans import Span
25
25
  from ..usage import Usage
26
26
  from ..util._json import _to_dump_compatible
27
27
  from .chatcmpl_converter import Converter
28
- from .chatcmpl_helpers import HEADERS, ChatCmplHelpers
28
+ from .chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE, ChatCmplHelpers
29
29
  from .chatcmpl_stream_handler import ChatCmplStreamHandler
30
30
  from .fake_id import FAKE_RESPONSES_ID
31
31
  from .interface import Model, ModelTracing
@@ -306,7 +306,7 @@ class OpenAIChatCompletionsModel(Model):
306
306
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
307
307
  verbosity=self._non_null_or_not_given(model_settings.verbosity),
308
308
  top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
309
- extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
309
+ extra_headers=self._merge_headers(model_settings),
310
310
  extra_query=model_settings.extra_query,
311
311
  extra_body=model_settings.extra_body,
312
312
  metadata=self._non_null_or_not_given(model_settings.metadata),
@@ -349,3 +349,10 @@ class OpenAIChatCompletionsModel(Model):
349
349
  if self._client is None:
350
350
  self._client = AsyncOpenAI()
351
351
  return self._client
352
+
353
+ def _merge_headers(self, model_settings: ModelSettings):
354
+ return {
355
+ **HEADERS,
356
+ **(model_settings.extra_headers or {}),
357
+ **(HEADERS_OVERRIDE.get() or {}),
358
+ }
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  from collections.abc import AsyncIterator
5
+ from contextvars import ContextVar
5
6
  from dataclasses import dataclass
6
7
  from typing import TYPE_CHECKING, Any, Literal, cast, overload
7
8
 
@@ -49,6 +50,11 @@ if TYPE_CHECKING:
49
50
  _USER_AGENT = f"Agents/Python {__version__}"
50
51
  _HEADERS = {"User-Agent": _USER_AGENT}
51
52
 
53
+ # Override headers used by the Responses API.
54
+ _HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
55
+ "openai_responses_headers_override", default=None
56
+ )
57
+
52
58
 
53
59
  class OpenAIResponsesModel(Model):
54
60
  """
@@ -312,7 +318,7 @@ class OpenAIResponsesModel(Model):
312
318
  tool_choice=tool_choice,
313
319
  parallel_tool_calls=parallel_tool_calls,
314
320
  stream=stream,
315
- extra_headers={**_HEADERS, **(model_settings.extra_headers or {})},
321
+ extra_headers=self._merge_headers(model_settings),
316
322
  extra_query=model_settings.extra_query,
317
323
  extra_body=model_settings.extra_body,
318
324
  text=response_format,
@@ -327,6 +333,13 @@ class OpenAIResponsesModel(Model):
327
333
  self._client = AsyncOpenAI()
328
334
  return self._client
329
335
 
336
+ def _merge_headers(self, model_settings: ModelSettings):
337
+ return {
338
+ **_HEADERS,
339
+ **(model_settings.extra_headers or {}),
340
+ **(_HEADERS_OVERRIDE.get() or {}),
341
+ }
342
+
330
343
 
331
344
  @dataclass
332
345
  class ConvertedTools:
@@ -3,6 +3,7 @@ from .config import (
3
3
  RealtimeAudioFormat,
4
4
  RealtimeClientMessage,
5
5
  RealtimeGuardrailsSettings,
6
+ RealtimeInputAudioNoiseReductionConfig,
6
7
  RealtimeInputAudioTranscriptionConfig,
7
8
  RealtimeModelName,
8
9
  RealtimeModelTracingConfig,
@@ -101,6 +102,7 @@ __all__ = [
101
102
  "RealtimeAudioFormat",
102
103
  "RealtimeClientMessage",
103
104
  "RealtimeGuardrailsSettings",
105
+ "RealtimeInputAudioNoiseReductionConfig",
104
106
  "RealtimeInputAudioTranscriptionConfig",
105
107
  "RealtimeModelName",
106
108
  "RealtimeModelTracingConfig",
agents/realtime/config.py CHANGED
@@ -61,6 +61,13 @@ class RealtimeInputAudioTranscriptionConfig(TypedDict):
61
61
  """An optional prompt to guide transcription."""
62
62
 
63
63
 
64
+ class RealtimeInputAudioNoiseReductionConfig(TypedDict):
65
+ """Noise reduction configuration for input audio."""
66
+
67
+ type: NotRequired[Literal["near_field", "far_field"]]
68
+ """Noise reduction mode to apply to input audio."""
69
+
70
+
64
71
  class RealtimeTurnDetectionConfig(TypedDict):
65
72
  """Turn detection config. Allows extra vendor keys if needed."""
66
73
 
@@ -119,6 +126,9 @@ class RealtimeSessionModelSettings(TypedDict):
119
126
  input_audio_transcription: NotRequired[RealtimeInputAudioTranscriptionConfig]
120
127
  """Configuration for transcribing input audio."""
121
128
 
129
+ input_audio_noise_reduction: NotRequired[RealtimeInputAudioNoiseReductionConfig | None]
130
+ """Noise reduction configuration for input audio."""
131
+
122
132
  turn_detection: NotRequired[RealtimeTurnDetectionConfig]
123
133
  """Configuration for detecting conversation turns."""
124
134
 
@@ -84,6 +84,7 @@ class RealtimeModelInputAudioTranscriptionCompletedEvent:
84
84
 
85
85
  type: Literal["input_audio_transcription_completed"] = "input_audio_transcription_completed"
86
86
 
87
+
87
88
  @dataclass
88
89
  class RealtimeModelInputAudioTimeoutTriggeredEvent:
89
90
  """Input audio timeout triggered."""
@@ -94,6 +95,7 @@ class RealtimeModelInputAudioTimeoutTriggeredEvent:
94
95
 
95
96
  type: Literal["input_audio_timeout_triggered"] = "input_audio_timeout_triggered"
96
97
 
98
+
97
99
  @dataclass
98
100
  class RealtimeModelTranscriptDeltaEvent:
99
101
  """Partial transcript update."""
@@ -825,14 +825,24 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
825
825
  "output_audio_format",
826
826
  DEFAULT_MODEL_SETTINGS.get("output_audio_format"),
827
827
  )
828
+ input_audio_noise_reduction = model_settings.get(
829
+ "input_audio_noise_reduction",
830
+ DEFAULT_MODEL_SETTINGS.get("input_audio_noise_reduction"),
831
+ )
828
832
 
829
833
  input_audio_config = None
830
834
  if any(
831
835
  value is not None
832
- for value in [input_audio_format, input_audio_transcription, turn_detection]
836
+ for value in [
837
+ input_audio_format,
838
+ input_audio_noise_reduction,
839
+ input_audio_transcription,
840
+ turn_detection,
841
+ ]
833
842
  ):
834
843
  input_audio_config = OpenAIRealtimeAudioInput(
835
844
  format=to_realtime_audio_format(input_audio_format),
845
+ noise_reduction=cast(Any, input_audio_noise_reduction),
836
846
  transcription=cast(Any, input_audio_transcription),
837
847
  turn_detection=cast(Any, turn_detection),
838
848
  )
@@ -408,6 +408,7 @@ class RealtimeSession(RealtimeModelListener):
408
408
  usage=self._context_wrapper.usage,
409
409
  tool_name=event.name,
410
410
  tool_call_id=event.call_id,
411
+ tool_arguments=event.arguments,
411
412
  )
412
413
  result = await func_tool.on_invoke_tool(tool_context, event.arguments)
413
414
 
@@ -432,6 +433,7 @@ class RealtimeSession(RealtimeModelListener):
432
433
  usage=self._context_wrapper.usage,
433
434
  tool_name=event.name,
434
435
  tool_call_id=event.call_id,
436
+ tool_arguments=event.arguments,
435
437
  )
436
438
 
437
439
  # Execute the handoff to get the new agent
agents/result.py CHANGED
@@ -185,31 +185,42 @@ class RunResultStreaming(RunResultBase):
185
185
  - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit.
186
186
  - A GuardrailTripwireTriggered exception if a guardrail is tripped.
187
187
  """
188
- while True:
189
- self._check_errors()
190
- if self._stored_exception:
191
- logger.debug("Breaking due to stored exception")
192
- self.is_complete = True
193
- break
188
+ try:
189
+ while True:
190
+ self._check_errors()
191
+ if self._stored_exception:
192
+ logger.debug("Breaking due to stored exception")
193
+ self.is_complete = True
194
+ break
194
195
 
195
- if self.is_complete and self._event_queue.empty():
196
- break
196
+ if self.is_complete and self._event_queue.empty():
197
+ break
197
198
 
198
- try:
199
- item = await self._event_queue.get()
200
- except asyncio.CancelledError:
201
- break
199
+ try:
200
+ item = await self._event_queue.get()
201
+ except asyncio.CancelledError:
202
+ break
202
203
 
203
- if isinstance(item, QueueCompleteSentinel):
204
- self._event_queue.task_done()
205
- # Check for errors, in case the queue was completed due to an exception
206
- self._check_errors()
207
- break
204
+ if isinstance(item, QueueCompleteSentinel):
205
+ # Await input guardrails if they are still running, so late
206
+ # exceptions are captured.
207
+ await self._await_task_safely(self._input_guardrails_task)
208
+
209
+ self._event_queue.task_done()
208
210
 
209
- yield item
210
- self._event_queue.task_done()
211
+ # Check for errors, in case the queue was completed
212
+ # due to an exception
213
+ self._check_errors()
214
+ break
211
215
 
212
- self._cleanup_tasks()
216
+ yield item
217
+ self._event_queue.task_done()
218
+ finally:
219
+ # Ensure main execution completes before cleanup to avoid race conditions
220
+ # with session operations
221
+ await self._await_task_safely(self._run_impl_task)
222
+ # Safely terminate all background tasks after main execution has finished
223
+ self._cleanup_tasks()
213
224
 
214
225
  if self._stored_exception:
215
226
  raise self._stored_exception
@@ -274,3 +285,19 @@ class RunResultStreaming(RunResultBase):
274
285
 
275
286
  def __str__(self) -> str:
276
287
  return pretty_print_run_result_streaming(self)
288
+
289
+ async def _await_task_safely(self, task: asyncio.Task[Any] | None) -> None:
290
+ """Await a task if present, ignoring cancellation and storing exceptions elsewhere.
291
+
292
+ This ensures we do not lose late guardrail exceptions while not surfacing
293
+ CancelledError to callers of stream_events.
294
+ """
295
+ if task and not task.done():
296
+ try:
297
+ await task
298
+ except asyncio.CancelledError:
299
+ # Task was cancelled (e.g., due to result.cancel()). Nothing to do here.
300
+ pass
301
+ except Exception:
302
+ # The exception will be surfaced via _check_errors() if needed.
303
+ pass