openai-agents 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/agent.py CHANGED
@@ -214,7 +214,7 @@ class Agent(AgentBase, Generic[TContext]):
214
214
  calls result in a final output.
215
215
 
216
216
  NOTE: This configuration is specific to FunctionTools. Hosted tools, such as file search,
217
- web search, etc are always processed by the LLM.
217
+ web search, etc. are always processed by the LLM.
218
218
  """
219
219
 
220
220
  reset_tool_choice: bool = True
@@ -289,30 +289,3 @@ class Agent(AgentBase, Generic[TContext]):
289
289
  ) -> ResponsePromptParam | None:
290
290
  """Get the prompt for the agent."""
291
291
  return await PromptUtil.to_model_input(self.prompt, run_context, self)
292
-
293
- async def get_mcp_tools(self, run_context: RunContextWrapper[TContext]) -> list[Tool]:
294
- """Fetches the available tools from the MCP servers."""
295
- convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
296
- return await MCPUtil.get_all_function_tools(
297
- self.mcp_servers, convert_schemas_to_strict, run_context, self
298
- )
299
-
300
- async def get_all_tools(self, run_context: RunContextWrapper[Any]) -> list[Tool]:
301
- """All agent tools, including MCP tools and function tools."""
302
- mcp_tools = await self.get_mcp_tools(run_context)
303
-
304
- async def _check_tool_enabled(tool: Tool) -> bool:
305
- if not isinstance(tool, FunctionTool):
306
- return True
307
-
308
- attr = tool.is_enabled
309
- if isinstance(attr, bool):
310
- return attr
311
- res = attr(run_context, self)
312
- if inspect.isawaitable(res):
313
- return bool(await res)
314
- return bool(res)
315
-
316
- results = await asyncio.gather(*(_check_tool_enabled(t) for t in self.tools))
317
- enabled: list[Tool] = [t for t, ok in zip(self.tools, results) if ok]
318
- return [*mcp_tools, *enabled]
agents/agent_output.py CHANGED
@@ -116,7 +116,7 @@ class AgentOutputSchema(AgentOutputSchemaBase):
116
116
  raise UserError(
117
117
  "Strict JSON schema is enabled, but the output type is not valid. "
118
118
  "Either make the output type strict, "
119
- "or wrap your type with AgentOutputSchema(your_type, strict_json_schema=False)"
119
+ "or wrap your type with AgentOutputSchema(YourType, strict_json_schema=False)"
120
120
  ) from e
121
121
 
122
122
  def is_plain_text(self) -> bool:
@@ -45,6 +45,14 @@ from ...tracing.spans import Span
45
45
  from ...usage import Usage
46
46
 
47
47
 
48
+ class InternalChatCompletionMessage(ChatCompletionMessage):
49
+ """
50
+ An internal subclass to carry reasoning_content without modifying the original model.
51
+ """
52
+
53
+ reasoning_content: str
54
+
55
+
48
56
  class LitellmModel(Model):
49
57
  """This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,
50
58
  Anthropic, Gemini, Mistral, and many other models.
@@ -364,13 +372,18 @@ class LitellmConverter:
364
372
  provider_specific_fields.get("refusal", None) if provider_specific_fields else None
365
373
  )
366
374
 
367
- return ChatCompletionMessage(
375
+ reasoning_content = ""
376
+ if hasattr(message, "reasoning_content") and message.reasoning_content:
377
+ reasoning_content = message.reasoning_content
378
+
379
+ return InternalChatCompletionMessage(
368
380
  content=message.content,
369
381
  refusal=refusal,
370
382
  role="assistant",
371
383
  annotations=cls.convert_annotations_to_openai(message),
372
384
  audio=message.get("audio", None), # litellm deletes audio if not present
373
385
  tool_calls=tool_calls,
386
+ reasoning_content=reasoning_content,
374
387
  )
375
388
 
376
389
  @classmethod
agents/function_schema.py CHANGED
@@ -76,7 +76,7 @@ class FuncSchema:
76
76
 
77
77
  @dataclass
78
78
  class FuncDocumentation:
79
- """Contains metadata about a python function, extracted from its docstring."""
79
+ """Contains metadata about a Python function, extracted from its docstring."""
80
80
 
81
81
  name: str
82
82
  """The name of the function, via `__name__`."""
@@ -194,7 +194,7 @@ def function_schema(
194
194
  strict_json_schema: bool = True,
195
195
  ) -> FuncSchema:
196
196
  """
197
- Given a python function, extracts a `FuncSchema` from it, capturing the name, description,
197
+ Given a Python function, extracts a `FuncSchema` from it, capturing the name, description,
198
198
  parameter descriptions, and other metadata.
199
199
 
200
200
  Args:
@@ -208,7 +208,7 @@ def function_schema(
208
208
  descriptions.
209
209
  strict_json_schema: Whether the JSON schema is in strict mode. If True, we'll ensure that
210
210
  the schema adheres to the "strict" standard the OpenAI API expects. We **strongly**
211
- recommend setting this to True, as it increases the likelihood of the LLM providing
211
+ recommend setting this to True, as it increases the likelihood of the LLM producing
212
212
  correct JSON input.
213
213
 
214
214
  Returns:
agents/guardrail.py CHANGED
@@ -78,8 +78,9 @@ class InputGuardrail(Generic[TContext]):
78
78
  You can use the `@input_guardrail()` decorator to turn a function into an `InputGuardrail`, or
79
79
  create an `InputGuardrail` manually.
80
80
 
81
- Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, the agent
82
- execution will immediately stop and a `InputGuardrailTripwireTriggered` exception will be raised
81
+ Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`,
82
+ the agent's execution will immediately stop, and
83
+ an `InputGuardrailTripwireTriggered` exception will be raised
83
84
  """
84
85
 
85
86
  guardrail_function: Callable[
@@ -132,7 +133,7 @@ class OutputGuardrail(Generic[TContext]):
132
133
  You can use the `@output_guardrail()` decorator to turn a function into an `OutputGuardrail`,
133
134
  or create an `OutputGuardrail` manually.
134
135
 
135
- Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, a
136
+ Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, an
136
137
  `OutputGuardrailTripwireTriggered` exception will be raised.
137
138
  """
138
139
 
@@ -316,7 +317,7 @@ def output_guardrail(
316
317
  ) -> OutputGuardrail[TContext_co]:
317
318
  return OutputGuardrail(
318
319
  guardrail_function=f,
319
- # Guardrail name defaults to function name when not specified (None).
320
+ # Guardrail name defaults to function's name when not specified (None).
320
321
  name=name if name else f.__name__,
321
322
  )
322
323
 
@@ -36,6 +36,7 @@ from openai.types.responses import (
36
36
  ResponseOutputRefusal,
37
37
  ResponseOutputText,
38
38
  ResponseReasoningItem,
39
+ ResponseReasoningItemParam,
39
40
  )
40
41
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
41
42
  from openai.types.responses.response_reasoning_item import Summary
@@ -210,6 +211,12 @@ class Converter:
210
211
  return cast(ResponseOutputMessageParam, item)
211
212
  return None
212
213
 
214
+ @classmethod
215
+ def maybe_reasoning_message(cls, item: Any) -> ResponseReasoningItemParam | None:
216
+ if isinstance(item, dict) and item.get("type") == "reasoning":
217
+ return cast(ResponseReasoningItemParam, item)
218
+ return None
219
+
213
220
  @classmethod
214
221
  def extract_text_content(
215
222
  cls, content: str | Iterable[ResponseInputContentParam]
@@ -459,7 +466,11 @@ class Converter:
459
466
  f"Encountered an item_reference, which is not supported: {item_ref}"
460
467
  )
461
468
 
462
- # 7) If we haven't recognized it => fail or ignore
469
+ # 7) reasoning message => not handled
470
+ elif cls.maybe_reasoning_message(item):
471
+ pass
472
+
473
+ # 8) If we haven't recognized it => fail or ignore
463
474
  else:
464
475
  raise UserError(f"Unhandled item type or structure: {item}")
465
476
 
@@ -198,6 +198,7 @@ class ChatCmplStreamHandler:
198
198
  is not None, # fixed 0 -> 0 or 1
199
199
  type="response.output_text.delta",
200
200
  sequence_number=sequence_number.get_and_increment(),
201
+ logprobs=[],
201
202
  )
202
203
  # Accumulate the text into the response part
203
204
  state.text_content_index_and_output[1].text += delta.content
@@ -288,10 +289,11 @@ class ChatCmplStreamHandler:
288
289
  function_call = state.function_calls[tc_delta.index]
289
290
 
290
291
  # Start streaming as soon as we have function name and call_id
291
- if (not state.function_call_streaming[tc_delta.index] and
292
- function_call.name and
293
- function_call.call_id):
294
-
292
+ if (
293
+ not state.function_call_streaming[tc_delta.index]
294
+ and function_call.name
295
+ and function_call.call_id
296
+ ):
295
297
  # Calculate the output index for this function call
296
298
  function_call_starting_index = 0
297
299
  if state.reasoning_content_index_and_output:
@@ -308,9 +310,9 @@ class ChatCmplStreamHandler:
308
310
 
309
311
  # Mark this function call as streaming and store its output index
310
312
  state.function_call_streaming[tc_delta.index] = True
311
- state.function_call_output_idx[
312
- tc_delta.index
313
- ] = function_call_starting_index
313
+ state.function_call_output_idx[tc_delta.index] = (
314
+ function_call_starting_index
315
+ )
314
316
 
315
317
  # Send initial function call added event
316
318
  yield ResponseOutputItemAddedEvent(
@@ -327,10 +329,11 @@ class ChatCmplStreamHandler:
327
329
  )
328
330
 
329
331
  # Stream arguments if we've started streaming this function call
330
- if (state.function_call_streaming.get(tc_delta.index, False) and
331
- tc_function and
332
- tc_function.arguments):
333
-
332
+ if (
333
+ state.function_call_streaming.get(tc_delta.index, False)
334
+ and tc_function
335
+ and tc_function.arguments
336
+ ):
334
337
  output_index = state.function_call_output_idx[tc_delta.index]
335
338
  yield ResponseFunctionCallArgumentsDeltaEvent(
336
339
  delta=tc_function.arguments,
@@ -47,6 +47,8 @@ from .model import (
47
47
  RealtimeModel,
48
48
  RealtimeModelConfig,
49
49
  RealtimeModelListener,
50
+ RealtimePlaybackState,
51
+ RealtimePlaybackTracker,
50
52
  )
51
53
  from .model_events import (
52
54
  RealtimeConnectionStatus,
@@ -139,6 +141,8 @@ __all__ = [
139
141
  "RealtimeModel",
140
142
  "RealtimeModelConfig",
141
143
  "RealtimeModelListener",
144
+ "RealtimePlaybackTracker",
145
+ "RealtimePlaybackState",
142
146
  # Model Events
143
147
  "RealtimeConnectionStatus",
144
148
  "RealtimeModelAudioDoneEvent",
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from datetime import datetime
5
+
6
+ from ._util import calculate_audio_length_ms
7
+ from .config import RealtimeAudioFormat
8
+
9
+
10
+ @dataclass
11
+ class ModelAudioState:
12
+ initial_received_time: datetime
13
+ audio_length_ms: float
14
+
15
+
16
+ class ModelAudioTracker:
17
+ def __init__(self) -> None:
18
+ # (item_id, item_content_index) -> ModelAudioState
19
+ self._states: dict[tuple[str, int], ModelAudioState] = {}
20
+ self._last_audio_item: tuple[str, int] | None = None
21
+
22
+ def set_audio_format(self, format: RealtimeAudioFormat) -> None:
23
+ """Called when the model wants to set the audio format."""
24
+ self._format = format
25
+
26
+ def on_audio_delta(self, item_id: str, item_content_index: int, audio_bytes: bytes) -> None:
27
+ """Called when an audio delta is received from the model."""
28
+ ms = calculate_audio_length_ms(self._format, audio_bytes)
29
+ new_key = (item_id, item_content_index)
30
+
31
+ self._last_audio_item = new_key
32
+ if new_key not in self._states:
33
+ self._states[new_key] = ModelAudioState(datetime.now(), ms)
34
+ else:
35
+ self._states[new_key].audio_length_ms += ms
36
+
37
+ def on_interrupted(self) -> None:
38
+ """Called when the audio playback has been interrupted."""
39
+ self._last_audio_item = None
40
+
41
+ def get_state(self, item_id: str, item_content_index: int) -> ModelAudioState | None:
42
+ """Called when the model wants to get the current playback state."""
43
+ return self._states.get((item_id, item_content_index))
44
+
45
+ def get_last_audio_item(self) -> tuple[str, int] | None:
46
+ """Called when the model wants to get the last audio item ID and content index."""
47
+ return self._last_audio_item
@@ -0,0 +1,9 @@
1
+ from __future__ import annotations
2
+
3
+ from .config import RealtimeAudioFormat
4
+
5
+
6
+ def calculate_audio_length_ms(format: RealtimeAudioFormat | None, audio_bytes: bytes) -> float:
7
+ if format and format.startswith("g711"):
8
+ return (len(audio_bytes) / 8000) * 1000
9
+ return (len(audio_bytes) / 24 / 2) * 1000
agents/realtime/events.py CHANGED
@@ -115,6 +115,12 @@ class RealtimeAudioEnd:
115
115
  info: RealtimeEventInfo
116
116
  """Common info for all events, such as the context."""
117
117
 
118
+ item_id: str
119
+ """The ID of the item containing audio."""
120
+
121
+ content_index: int
122
+ """The index of the audio content in `item.content`"""
123
+
118
124
  type: Literal["audio_end"] = "audio_end"
119
125
 
120
126
 
@@ -125,6 +131,12 @@ class RealtimeAudio:
125
131
  audio: RealtimeModelAudioEvent
126
132
  """The audio event from the model layer."""
127
133
 
134
+ item_id: str
135
+ """The ID of the item containing audio."""
136
+
137
+ content_index: int
138
+ """The index of the audio content in `item.content`"""
139
+
128
140
  info: RealtimeEventInfo
129
141
  """Common info for all events, such as the context."""
130
142
 
@@ -140,6 +152,12 @@ class RealtimeAudioInterrupted:
140
152
  info: RealtimeEventInfo
141
153
  """Common info for all events, such as the context."""
142
154
 
155
+ item_id: str
156
+ """The ID of the item containing audio."""
157
+
158
+ content_index: int
159
+ """The index of the audio content in `item.content`"""
160
+
143
161
  type: Literal["audio_interrupted"] = "audio_interrupted"
144
162
 
145
163
 
agents/realtime/model.py CHANGED
@@ -6,13 +6,95 @@ from typing import Callable
6
6
  from typing_extensions import NotRequired, TypedDict
7
7
 
8
8
  from ..util._types import MaybeAwaitable
9
+ from ._util import calculate_audio_length_ms
9
10
  from .config import (
11
+ RealtimeAudioFormat,
10
12
  RealtimeSessionModelSettings,
11
13
  )
12
14
  from .model_events import RealtimeModelEvent
13
15
  from .model_inputs import RealtimeModelSendEvent
14
16
 
15
17
 
18
+ class RealtimePlaybackState(TypedDict):
19
+ current_item_id: str | None
20
+ """The item ID of the current item being played."""
21
+
22
+ current_item_content_index: int | None
23
+ """The index of the current item content being played."""
24
+
25
+ elapsed_ms: float | None
26
+ """The number of milliseconds of audio that have been played."""
27
+
28
+
29
+ class RealtimePlaybackTracker:
30
+ """If you have custom playback logic or expect that audio is played with delays or at different
31
+ speeds, create an instance of RealtimePlaybackTracker and pass it to the session. You are
32
+ responsible for tracking the audio playback progress and calling `on_play_bytes` or
33
+ `on_play_ms` when the user has played some audio."""
34
+
35
+ def __init__(self) -> None:
36
+ self._format: RealtimeAudioFormat | None = None
37
+ # (item_id, item_content_index)
38
+ self._current_item: tuple[str, int] | None = None
39
+ self._elapsed_ms: float | None = None
40
+
41
+ def on_play_bytes(self, item_id: str, item_content_index: int, bytes: bytes) -> None:
42
+ """Called by you when you have played some audio.
43
+
44
+ Args:
45
+ item_id: The item ID of the audio being played.
46
+ item_content_index: The index of the audio content in `item.content`
47
+ bytes: The audio bytes that have been fully played.
48
+ """
49
+ ms = calculate_audio_length_ms(self._format, bytes)
50
+ self.on_play_ms(item_id, item_content_index, ms)
51
+
52
+ def on_play_ms(self, item_id: str, item_content_index: int, ms: float) -> None:
53
+ """Called by you when you have played some audio.
54
+
55
+ Args:
56
+ item_id: The item ID of the audio being played.
57
+ item_content_index: The index of the audio content in `item.content`
58
+ ms: The number of milliseconds of audio that have been played.
59
+ """
60
+ if self._current_item != (item_id, item_content_index):
61
+ self._current_item = (item_id, item_content_index)
62
+ self._elapsed_ms = ms
63
+ else:
64
+ assert self._elapsed_ms is not None
65
+ self._elapsed_ms += ms
66
+
67
+ def on_interrupted(self) -> None:
68
+ """Called by the model when the audio playback has been interrupted."""
69
+ self._current_item = None
70
+ self._elapsed_ms = None
71
+
72
+ def set_audio_format(self, format: RealtimeAudioFormat) -> None:
73
+ """Will be called by the model to set the audio format.
74
+
75
+ Args:
76
+ format: The audio format to use.
77
+ """
78
+ self._format = format
79
+
80
+ def get_state(self) -> RealtimePlaybackState:
81
+ """Will be called by the model to get the current playback state."""
82
+ if self._current_item is None:
83
+ return {
84
+ "current_item_id": None,
85
+ "current_item_content_index": None,
86
+ "elapsed_ms": None,
87
+ }
88
+ assert self._elapsed_ms is not None
89
+
90
+ item_id, item_content_index = self._current_item
91
+ return {
92
+ "current_item_id": item_id,
93
+ "current_item_content_index": item_content_index,
94
+ "elapsed_ms": self._elapsed_ms,
95
+ }
96
+
97
+
16
98
  class RealtimeModelListener(abc.ABC):
17
99
  """A listener for realtime transport events."""
18
100
 
@@ -39,6 +121,18 @@ class RealtimeModelConfig(TypedDict):
39
121
  initial_model_settings: NotRequired[RealtimeSessionModelSettings]
40
122
  """The initial model settings to use when connecting."""
41
123
 
124
+ playback_tracker: NotRequired[RealtimePlaybackTracker]
125
+ """The playback tracker to use when tracking audio playback progress. If not set, the model will
126
+ use a default implementation that assumes audio is played immediately, at realtime speed.
127
+
128
+ A playback tracker is useful for interruptions. The model generates audio much faster than
129
+ realtime playback speed. So if there's an interruption, its useful for the model to know how
130
+ much of the audio has been played by the user. In low-latency scenarios, it's fine to assume
131
+ that audio is played back immediately at realtime speed. But in scenarios like phone calls or
132
+ other remote interactions, you can set a playback tracker that lets the model know when audio
133
+ is played to the user.
134
+ """
135
+
42
136
 
43
137
  class RealtimeModel(abc.ABC):
44
138
  """Interface for connecting to a realtime model and sending/receiving events."""
@@ -40,6 +40,12 @@ class RealtimeModelAudioEvent:
40
40
  data: bytes
41
41
  response_id: str
42
42
 
43
+ item_id: str
44
+ """The ID of the item containing audio."""
45
+
46
+ content_index: int
47
+ """The index of the audio content in `item.content`"""
48
+
43
49
  type: Literal["audio"] = "audio"
44
50
 
45
51
 
@@ -47,6 +53,12 @@ class RealtimeModelAudioEvent:
47
53
  class RealtimeModelAudioInterruptedEvent:
48
54
  """Audio interrupted."""
49
55
 
56
+ item_id: str
57
+ """The ID of the item containing audio."""
58
+
59
+ content_index: int
60
+ """The index of the audio content in `item.content`"""
61
+
50
62
  type: Literal["audio_interrupted"] = "audio_interrupted"
51
63
 
52
64
 
@@ -54,6 +66,12 @@ class RealtimeModelAudioInterruptedEvent:
54
66
  class RealtimeModelAudioDoneEvent:
55
67
  """Audio done."""
56
68
 
69
+ item_id: str
70
+ """The ID of the item containing audio."""
71
+
72
+ content_index: int
73
+ """The index of the audio content in `item.content`"""
74
+
57
75
  type: Literal["audio_done"] = "audio_done"
58
76
 
59
77
 
@@ -138,6 +156,15 @@ class RealtimeModelExceptionEvent:
138
156
  type: Literal["exception"] = "exception"
139
157
 
140
158
 
159
+ @dataclass
160
+ class RealtimeModelRawServerEvent:
161
+ """Raw events forwarded from the server."""
162
+
163
+ data: Any
164
+
165
+ type: Literal["raw_server_event"] = "raw_server_event"
166
+
167
+
141
168
  # TODO (rm) Add usage events
142
169
 
143
170
 
@@ -156,4 +183,5 @@ RealtimeModelEvent: TypeAlias = Union[
156
183
  RealtimeModelTurnEndedEvent,
157
184
  RealtimeModelOtherEvent,
158
185
  RealtimeModelExceptionEvent,
186
+ RealtimeModelRawServerEvent,
159
187
  ]
@@ -57,6 +57,7 @@ from typing_extensions import assert_never
57
57
  from websockets.asyncio.client import ClientConnection
58
58
 
59
59
  from agents.handoffs import Handoff
60
+ from agents.realtime._default_tracker import ModelAudioTracker
60
61
  from agents.tool import FunctionTool, Tool
61
62
  from agents.util._types import MaybeAwaitable
62
63
 
@@ -72,6 +73,8 @@ from .model import (
72
73
  RealtimeModel,
73
74
  RealtimeModelConfig,
74
75
  RealtimeModelListener,
76
+ RealtimePlaybackState,
77
+ RealtimePlaybackTracker,
75
78
  )
76
79
  from .model_events import (
77
80
  RealtimeModelAudioDoneEvent,
@@ -83,6 +86,7 @@ from .model_events import (
83
86
  RealtimeModelInputAudioTranscriptionCompletedEvent,
84
87
  RealtimeModelItemDeletedEvent,
85
88
  RealtimeModelItemUpdatedEvent,
89
+ RealtimeModelRawServerEvent,
86
90
  RealtimeModelToolCallEvent,
87
91
  RealtimeModelTranscriptDeltaEvent,
88
92
  RealtimeModelTurnEndedEvent,
@@ -133,11 +137,11 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
133
137
  self._websocket_task: asyncio.Task[None] | None = None
134
138
  self._listeners: list[RealtimeModelListener] = []
135
139
  self._current_item_id: str | None = None
136
- self._audio_start_time: datetime | None = None
137
- self._audio_length_ms: float = 0.0
140
+ self._audio_state_tracker: ModelAudioTracker = ModelAudioTracker()
138
141
  self._ongoing_response: bool = False
139
- self._current_audio_content_index: int | None = None
140
142
  self._tracing_config: RealtimeModelTracingConfig | Literal["auto"] | None = None
143
+ self._playback_tracker: RealtimePlaybackTracker | None = None
144
+ self._created_session: OpenAISessionObject | None = None
141
145
 
142
146
  async def connect(self, options: RealtimeModelConfig) -> None:
143
147
  """Establish a connection to the model and keep it alive."""
@@ -146,6 +150,8 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
146
150
 
147
151
  model_settings: RealtimeSessionModelSettings = options.get("initial_model_settings", {})
148
152
 
153
+ self._playback_tracker = options.get("playback_tracker", RealtimePlaybackTracker())
154
+
149
155
  self.model = model_settings.get("model_name", self.model)
150
156
  api_key = await get_api_key(options.get("api_key"))
151
157
 
@@ -294,26 +300,69 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
294
300
  if event.start_response:
295
301
  await self._send_raw_message(OpenAIResponseCreateEvent(type="response.create"))
296
302
 
303
+ def _get_playback_state(self) -> RealtimePlaybackState:
304
+ if self._playback_tracker:
305
+ return self._playback_tracker.get_state()
306
+
307
+ if last_audio_item_id := self._audio_state_tracker.get_last_audio_item():
308
+ item_id, item_content_index = last_audio_item_id
309
+ audio_state = self._audio_state_tracker.get_state(item_id, item_content_index)
310
+ if audio_state:
311
+ elapsed_ms = (
312
+ datetime.now() - audio_state.initial_received_time
313
+ ).total_seconds() * 1000
314
+ return {
315
+ "current_item_id": item_id,
316
+ "current_item_content_index": item_content_index,
317
+ "elapsed_ms": elapsed_ms,
318
+ }
319
+
320
+ return {
321
+ "current_item_id": None,
322
+ "current_item_content_index": None,
323
+ "elapsed_ms": None,
324
+ }
325
+
297
326
  async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> None:
298
- if not self._current_item_id or not self._audio_start_time:
327
+ playback_state = self._get_playback_state()
328
+ current_item_id = playback_state.get("current_item_id")
329
+ current_item_content_index = playback_state.get("current_item_content_index")
330
+ elapsed_ms = playback_state.get("elapsed_ms")
331
+ if current_item_id is None or elapsed_ms is None:
332
+ logger.info(
333
+ "Skipping interrupt. "
334
+ f"Item id: {current_item_id}, "
335
+ f"elapsed ms: {elapsed_ms}, "
336
+ f"content index: {current_item_content_index}"
337
+ )
299
338
  return
300
339
 
301
- await self._cancel_response()
302
-
303
- elapsed_time_ms = (datetime.now() - self._audio_start_time).total_seconds() * 1000
304
- if elapsed_time_ms > 0 and elapsed_time_ms < self._audio_length_ms:
305
- await self._emit_event(RealtimeModelAudioInterruptedEvent())
340
+ current_item_content_index = current_item_content_index or 0
341
+ if elapsed_ms > 0:
342
+ await self._emit_event(
343
+ RealtimeModelAudioInterruptedEvent(
344
+ item_id=current_item_id,
345
+ content_index=current_item_content_index,
346
+ )
347
+ )
306
348
  converted = _ConversionHelper.convert_interrupt(
307
- self._current_item_id,
308
- self._current_audio_content_index or 0,
309
- int(elapsed_time_ms),
349
+ current_item_id,
350
+ current_item_content_index,
351
+ int(elapsed_ms),
310
352
  )
311
353
  await self._send_raw_message(converted)
312
354
 
313
- self._current_item_id = None
314
- self._audio_start_time = None
315
- self._audio_length_ms = 0.0
316
- self._current_audio_content_index = None
355
+ automatic_response_cancellation_enabled = (
356
+ self._created_session
357
+ and self._created_session.turn_detection
358
+ and self._created_session.turn_detection.interrupt_response
359
+ )
360
+ if not automatic_response_cancellation_enabled:
361
+ await self._cancel_response()
362
+
363
+ self._audio_state_tracker.on_interrupted()
364
+ if self._playback_tracker:
365
+ self._playback_tracker.on_interrupted()
317
366
 
318
367
  async def _send_session_update(self, event: RealtimeModelSendSessionUpdate) -> None:
319
368
  """Send a session update to the model."""
@@ -321,23 +370,21 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
321
370
 
322
371
  async def _handle_audio_delta(self, parsed: ResponseAudioDeltaEvent) -> None:
323
372
  """Handle audio delta events and update audio tracking state."""
324
- self._current_audio_content_index = parsed.content_index
325
373
  self._current_item_id = parsed.item_id
326
- if self._audio_start_time is None:
327
- self._audio_start_time = datetime.now()
328
- self._audio_length_ms = 0.0
329
374
 
330
375
  audio_bytes = base64.b64decode(parsed.delta)
331
- # Calculate audio length in ms using 24KHz pcm16le
332
- self._audio_length_ms += self._calculate_audio_length_ms(audio_bytes)
376
+
377
+ self._audio_state_tracker.on_audio_delta(parsed.item_id, parsed.content_index, audio_bytes)
378
+
333
379
  await self._emit_event(
334
- RealtimeModelAudioEvent(data=audio_bytes, response_id=parsed.response_id)
380
+ RealtimeModelAudioEvent(
381
+ data=audio_bytes,
382
+ response_id=parsed.response_id,
383
+ item_id=parsed.item_id,
384
+ content_index=parsed.content_index,
385
+ )
335
386
  )
336
387
 
337
- def _calculate_audio_length_ms(self, audio_bytes: bytes) -> float:
338
- """Calculate audio length in milliseconds for 24KHz PCM16LE format."""
339
- return len(audio_bytes) / 24 / 2
340
-
341
388
  async def _handle_output_item(self, item: ConversationItem) -> None:
342
389
  """Handle response output item events (function calls and messages)."""
343
390
  if item.type == "function_call" and item.status == "completed":
@@ -401,6 +448,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
401
448
  self._ongoing_response = False
402
449
 
403
450
  async def _handle_ws_event(self, event: dict[str, Any]):
451
+ await self._emit_event(RealtimeModelRawServerEvent(data=event))
404
452
  try:
405
453
  if "previous_item_id" in event and event["previous_item_id"] is None:
406
454
  event["previous_item_id"] = "" # TODO (rm) remove
@@ -429,7 +477,12 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
429
477
  if parsed.type == "response.audio.delta":
430
478
  await self._handle_audio_delta(parsed)
431
479
  elif parsed.type == "response.audio.done":
432
- await self._emit_event(RealtimeModelAudioDoneEvent())
480
+ await self._emit_event(
481
+ RealtimeModelAudioDoneEvent(
482
+ item_id=parsed.item_id,
483
+ content_index=parsed.content_index,
484
+ )
485
+ )
433
486
  elif parsed.type == "input_audio_buffer.speech_started":
434
487
  await self._send_interrupt(RealtimeModelSendInterrupt())
435
488
  elif parsed.type == "response.created":
@@ -440,6 +493,9 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
440
493
  await self._emit_event(RealtimeModelTurnEndedEvent())
441
494
  elif parsed.type == "session.created":
442
495
  await self._send_tracing_config(self._tracing_config)
496
+ self._update_created_session(parsed.session) # type: ignore
497
+ elif parsed.type == "session.updated":
498
+ self._update_created_session(parsed.session) # type: ignore
443
499
  elif parsed.type == "error":
444
500
  await self._emit_event(RealtimeModelErrorEvent(error=parsed.error))
445
501
  elif parsed.type == "conversation.item.deleted":
@@ -489,6 +545,13 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
489
545
  ):
490
546
  await self._handle_output_item(parsed.item)
491
547
 
548
+ def _update_created_session(self, session: OpenAISessionObject) -> None:
549
+ self._created_session = session
550
+ if session.output_audio_format:
551
+ self._audio_state_tracker.set_audio_format(session.output_audio_format)
552
+ if self._playback_tracker:
553
+ self._playback_tracker.set_audio_format(session.output_audio_format)
554
+
492
555
  async def _update_session_config(self, model_settings: RealtimeSessionModelSettings) -> None:
493
556
  session_config = self._get_session_config(model_settings)
494
557
  await self._send_raw_message(
@@ -188,11 +188,26 @@ class RealtimeSession(RealtimeModelListener):
188
188
  elif event.type == "function_call":
189
189
  await self._handle_tool_call(event)
190
190
  elif event.type == "audio":
191
- await self._put_event(RealtimeAudio(info=self._event_info, audio=event))
191
+ await self._put_event(
192
+ RealtimeAudio(
193
+ info=self._event_info,
194
+ audio=event,
195
+ item_id=event.item_id,
196
+ content_index=event.content_index,
197
+ )
198
+ )
192
199
  elif event.type == "audio_interrupted":
193
- await self._put_event(RealtimeAudioInterrupted(info=self._event_info))
200
+ await self._put_event(
201
+ RealtimeAudioInterrupted(
202
+ info=self._event_info, item_id=event.item_id, content_index=event.content_index
203
+ )
204
+ )
194
205
  elif event.type == "audio_done":
195
- await self._put_event(RealtimeAudioEnd(info=self._event_info))
206
+ await self._put_event(
207
+ RealtimeAudioEnd(
208
+ info=self._event_info, item_id=event.item_id, content_index=event.content_index
209
+ )
210
+ )
196
211
  elif event.type == "input_audio_transcription_completed":
197
212
  self._history = RealtimeSession._get_new_history(self._history, event)
198
213
  await self._put_event(
@@ -259,6 +274,8 @@ class RealtimeSession(RealtimeModelListener):
259
274
  self._stored_exception = event.exception
260
275
  elif event.type == "other":
261
276
  pass
277
+ elif event.type == "raw_server_event":
278
+ pass
262
279
  else:
263
280
  assert_never(event)
264
281
 
agents/tracing/create.py CHANGED
@@ -50,8 +50,7 @@ def trace(
50
50
  group_id: Optional grouping identifier to link multiple traces from the same conversation
51
51
  or process. For instance, you might use a chat thread ID.
52
52
  metadata: Optional dictionary of additional metadata to attach to the trace.
53
- disabled: If True, we will return a Trace but the Trace will not be recorded. This will
54
- not be checked if there's an existing trace and `even_if_trace_running` is True.
53
+ disabled: If True, we will return a Trace but the Trace will not be recorded.
55
54
 
56
55
  Returns:
57
56
  The newly created trace object.
@@ -22,7 +22,7 @@ class ConsoleSpanExporter(TracingExporter):
22
22
  def export(self, items: list[Trace | Span[Any]]) -> None:
23
23
  for item in items:
24
24
  if isinstance(item, Trace):
25
- print(f"[Exporter] Export trace_id={item.trace_id}, name={item.name}, ")
25
+ print(f"[Exporter] Export trace_id={item.trace_id}, name={item.name}")
26
26
  else:
27
27
  print(f"[Exporter] Export span: {item.export()}")
28
28
 
@@ -121,7 +121,7 @@ class BackendSpanExporter(TracingExporter):
121
121
  logger.debug(f"Exported {len(items)} items")
122
122
  return
123
123
 
124
- # If the response is a client error (4xx), we wont retry
124
+ # If the response is a client error (4xx), we won't retry
125
125
  if 400 <= response.status_code < 500:
126
126
  logger.error(
127
127
  f"[non-fatal] Tracing client error {response.status_code}: {response.text}"
@@ -183,7 +183,7 @@ class BatchTraceProcessor(TracingProcessor):
183
183
  self._shutdown_event = threading.Event()
184
184
 
185
185
  # The queue size threshold at which we export immediately.
186
- self._export_trigger_size = int(max_queue_size * export_trigger_ratio)
186
+ self._export_trigger_size = max(1, int(max_queue_size * export_trigger_ratio))
187
187
 
188
188
  # Track when we next *must* perform a scheduled export
189
189
  self._next_export_time = time.time() + self._schedule_delay
@@ -269,8 +269,7 @@ class BatchTraceProcessor(TracingProcessor):
269
269
 
270
270
  def _export_batches(self, force: bool = False):
271
271
  """Drains the queue and exports in batches. If force=True, export everything.
272
- Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a
273
- certain threshold.
272
+ Otherwise, export up to `max_batch_size` repeatedly until the queue is completely empty.
274
273
  """
275
274
  while True:
276
275
  items_to_export: list[Span[Any] | Trace] = []
agents/tracing/traces.py CHANGED
@@ -10,7 +10,7 @@ from .processor_interface import TracingProcessor
10
10
  from .scope import Scope
11
11
 
12
12
 
13
- class Trace:
13
+ class Trace(abc.ABC):
14
14
  """
15
15
  A trace is the root level object that tracing creates. It represents a logical "workflow".
16
16
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
23
  Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.96.1
24
+ Requires-Dist: openai<2,>=1.97.1
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
@@ -338,6 +338,7 @@ make format-check # run style checker
338
338
  We'd like to acknowledge the excellent work of the open-source community, especially:
339
339
 
340
340
  - [Pydantic](https://docs.pydantic.dev/latest/) (data validation) and [PydanticAI](https://ai.pydantic.dev/) (advanced agent framework)
341
+ - [LiteLLM](https://github.com/BerriAI/litellm) (unified interface for 100+ LLMs)
341
342
  - [MkDocs](https://github.com/squidfunk/mkdocs-material)
342
343
  - [Griffe](https://github.com/mkdocstrings/griffe)
343
344
  - [uv](https://github.com/astral-sh/uv) and [ruff](https://github.com/astral-sh/ruff)
@@ -2,12 +2,12 @@ agents/__init__.py,sha256=KO_SBzwwg7cXPvMNDD1_lRhFIVR6E2RmyU624sAEEVo,7781
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=LlUM0YqZWmqz4WoWu0YK1Du6k09TX-ot94sikM16Y4U,44507
5
- agents/agent.py,sha256=eWtYqVJHz3ol3SoLZm132_sJ46dF5DEKQ8aV8KgDv2E,13381
6
- agents/agent_output.py,sha256=bHItis02dw-issbxjB4VnjUFdSByM9OR26rzxsFOSnQ,7154
5
+ agents/agent.py,sha256=Hn6O16BQ4jWG_qBx2PiIBvBr0BlwEf4AivK76fe61Gw,12184
6
+ agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
- agents/function_schema.py,sha256=JvMh356N60_c3hj7BXySuM7eqVwP00jealR7rdPnl60,13590
10
- agents/guardrail.py,sha256=kanNTh1OqSpzFH6QyNfucLDYHbBnvq3u-kWnFJw4lD8,9571
9
+ agents/function_schema.py,sha256=yZ3PEOmfy836Me_W4QlItMeFq2j4BtpuI2FmQswbIcQ,13590
10
+ agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
11
  agents/handoffs.py,sha256=L-b2eMNKyi-uF5Isz7UfpKc2Amvqies3i5tVjDnM3M4,10793
12
12
  agents/items.py,sha256=ZKc4aOBearYF4ItT9qtmehUUt9aS-3D0kVA3reoV1mU,9732
13
13
  agents/lifecycle.py,sha256=C1LSoCa_0zf0nt7yI3SKL5bAAG4Cso6--Gmk8S8zpJg,3111
@@ -30,7 +30,7 @@ agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01f
30
30
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
31
31
  agents/extensions/visualization.py,sha256=g2eEwW22qe3A4WtH37LwaHhK3QZE9FYHVw9IcOVpwbk,4699
32
32
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- agents/extensions/models/litellm_model.py,sha256=Gmk7M4KGZ-Mfk2LUCzHL-FMm5C6_n41QzwSMVxYcfE8,15014
33
+ agents/extensions/models/litellm_model.py,sha256=TWd57pzGJGpyvrBstqiFsPHlUFnExw1muchGGBA2jJc,15437
34
34
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
35
35
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
36
36
  agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
@@ -39,9 +39,9 @@ agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
39
39
  agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
40
40
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
42
- agents/models/chatcmpl_converter.py,sha256=lHVmWOxULJd_Q9WnWdh_ZYYRq07-4UNfpl7KDZEGZdg,19420
42
+ agents/models/chatcmpl_converter.py,sha256=m05aOXzO9y23qO3u2-7pHWZ7rdIWQZfckI2KACdIOUY,19829
43
43
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
44
- agents/models/chatcmpl_stream_handler.py,sha256=3tbGS-mCOVUz9lLbm35rFxC8piYDsWqIJ8DCPE1DjuQ,23999
44
+ agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
45
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
47
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
@@ -49,29 +49,31 @@ agents/models/openai_chatcompletions.py,sha256=Br7nWsibVvMr0jff6H6adpe_AjYTgLgoA
49
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
50
  agents/models/openai_responses.py,sha256=IaZ419gGkx8cWDZxi_2djvAor3RoUUiAdid782WOyv0,16720
51
51
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
- agents/realtime/__init__.py,sha256=MPdn2EXsjP1WX-iGaQm94Yw_j8xNm-KcO-vdHhm0sCw,4807
52
+ agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
53
+ agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
54
+ agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
53
55
  agents/realtime/agent.py,sha256=xVQYVJjsbi4FpJZ8jwogfKUsguOzpWXWih6rqLZ8AgE,3745
54
56
  agents/realtime/config.py,sha256=O7EGQgHrv2p0gtvZfODwSb4g1RJXkJ2ySH1YdNLt_K8,5751
55
- agents/realtime/events.py,sha256=bOyO7Yv0g_6StXKqAzapNTOq8GdaOuQqj3BbtXNfHU4,5090
57
+ agents/realtime/events.py,sha256=YnyXmkc2rkIAcCDoW5yxylMYeXeaq_QTlyRR5u5VsaM,5534
56
58
  agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
57
59
  agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
58
- agents/realtime/model.py,sha256=YwMBwtj33Z6uADnz1AoYg4wSfmpfYdZNq7ZaK8hlekw,2188
59
- agents/realtime/model_events.py,sha256=JDh70uDctVuwex5EiYUdWhqQvBarN3ge7eREd1aUznU,3386
60
+ agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
61
+ agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1t0,4034
60
62
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
61
- agents/realtime/openai_realtime.py,sha256=dvy07idciGl8E0swuvmgtHYf7DUSitHrwImAiG_VFq0,27323
63
+ agents/realtime/openai_realtime.py,sha256=wuZ4AFWpgpX76pCgxmt87-Oz738IgxbdF3wqMODaZUI,29817
62
64
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
63
- agents/realtime/session.py,sha256=jMOYmv3KcszBLLPcYkW_ChNiOblyoiGiYZ9jE6WUNEU,21908
65
+ agents/realtime/session.py,sha256=WokpD9EfYacdVpiufWGdtNTDxPUZJxtPjbDfmWlJ40M,22411
64
66
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
65
- agents/tracing/create.py,sha256=Gm9N5O2DeBy6UU86tRN0wnmzWyXb-qAUBbTj9oxIHao,18106
67
+ agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
66
68
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
67
69
  agents/tracing/processor_interface.py,sha256=e1mWcIAoQFHID1BapcrAZ6MxZg98bPVYgbOPclVoCXc,1660
68
- agents/tracing/processors.py,sha256=lOdZHwo0rQAflVkKWOZinnWyLtS0stALyydiFOC0gss,11389
70
+ agents/tracing/processors.py,sha256=hgFMnN9QP03UcIx6rkeaWa7rfPeVQ0K1rvUc7m84NVY,11370
69
71
  agents/tracing/provider.py,sha256=hiMTAiVnmnZ2RW6HYvL1hckXE-GQEqTSRvZCVcBY7pI,9212
70
72
  agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
71
73
  agents/tracing/setup.py,sha256=2h9TH1GAKcXKM1U99dOKKR3XlHp8JKzh2JG3DQPKyhY,612
72
74
  agents/tracing/span_data.py,sha256=nI2Fbu1ORE8ybE6m6RuddTJF5E5xFmEj8Mq5bSFv4bE,9017
73
75
  agents/tracing/spans.py,sha256=6vVzocGMsdgIma1ksqkBZmhar91xj4RpgcpUC3iibqg,6606
74
- agents/tracing/traces.py,sha256=G5LlECSK-DBRFP-bjT8maZjBQulz6SaHILYauUVlfq8,4775
76
+ agents/tracing/traces.py,sha256=EU5KNlNOTC9GFBls5ONDA0FkaUdLrM6y-cLK5953kqE,4784
75
77
  agents/tracing/util.py,sha256=J7IZgVDmeW0aZDw8LBSjBKrlQbcOmaqZE7XQjolPwi8,490
76
78
  agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
79
  agents/util/_coro.py,sha256=S38XUYFC7bqTELSgMUBsAX1GoRlIrV7coupcUAWH__4,45
@@ -95,7 +97,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
95
97
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
96
98
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
97
99
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
98
- openai_agents-0.2.3.dist-info/METADATA,sha256=XtY_daBoaLEGJdzn0ZX53F2vL78SYYTQTsTfZLgzOIM,11567
99
- openai_agents-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
100
- openai_agents-0.2.3.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
101
- openai_agents-0.2.3.dist-info/RECORD,,
100
+ openai_agents-0.2.4.dist-info/METADATA,sha256=EE1UoLLCNOh6Moihq8sUzWj0ACABNZiDOqYhdF8EcH4,11651
101
+ openai_agents-0.2.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
+ openai_agents-0.2.4.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
+ openai_agents-0.2.4.dist-info/RECORD,,