openai-agents 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/_run_impl.py CHANGED
@@ -1172,6 +1172,8 @@ class RunImpl:
1172
1172
  event = RunItemStreamEvent(item=item, name="reasoning_item_created")
1173
1173
  elif isinstance(item, MCPApprovalRequestItem):
1174
1174
  event = RunItemStreamEvent(item=item, name="mcp_approval_requested")
1175
+ elif isinstance(item, MCPApprovalResponseItem):
1176
+ event = RunItemStreamEvent(item=item, name="mcp_approval_response")
1175
1177
  elif isinstance(item, MCPListToolsItem):
1176
1178
  event = RunItemStreamEvent(item=item, name="mcp_list_tools")
1177
1179
 
@@ -319,3 +319,16 @@ class SQLAlchemySession(SessionABC):
319
319
  await sess.execute(
320
320
  delete(self._sessions).where(self._sessions.c.session_id == self.session_id)
321
321
  )
322
+
323
+ @property
324
+ def engine(self) -> AsyncEngine:
325
+ """Access the underlying SQLAlchemy AsyncEngine.
326
+
327
+ This property provides direct access to the engine for advanced use cases,
328
+ such as checking connection pool status, configuring engine settings,
329
+ or manually disposing the engine when needed.
330
+
331
+ Returns:
332
+ AsyncEngine: The SQLAlchemy async engine instance.
333
+ """
334
+ return self._engine
@@ -44,6 +44,7 @@ from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
44
44
  from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
45
45
  from ...models.fake_id import FAKE_RESPONSES_ID
46
46
  from ...models.interface import Model, ModelTracing
47
+ from ...models.openai_responses import Converter as OpenAIResponsesConverter
47
48
  from ...tool import Tool
48
49
  from ...tracing import generation_span
49
50
  from ...tracing.span_data import GenerationSpanData
@@ -109,18 +110,26 @@ class LitellmModel(Model):
109
110
  prompt=prompt,
110
111
  )
111
112
 
112
- assert isinstance(response.choices[0], litellm.types.utils.Choices)
113
+ message: litellm.types.utils.Message | None = None
114
+ first_choice: litellm.types.utils.Choices | None = None
115
+ if response.choices and len(response.choices) > 0:
116
+ choice = response.choices[0]
117
+ if isinstance(choice, litellm.types.utils.Choices):
118
+ first_choice = choice
119
+ message = first_choice.message
113
120
 
114
121
  if _debug.DONT_LOG_MODEL_DATA:
115
122
  logger.debug("Received model response")
116
123
  else:
117
- logger.debug(
118
- f"""LLM resp:\n{
119
- json.dumps(
120
- response.choices[0].message.model_dump(), indent=2, ensure_ascii=False
121
- )
122
- }\n"""
123
- )
124
+ if message is not None:
125
+ logger.debug(
126
+ f"""LLM resp:\n{
127
+ json.dumps(message.model_dump(), indent=2, ensure_ascii=False)
128
+ }\n"""
129
+ )
130
+ else:
131
+ finish_reason = first_choice.finish_reason if first_choice else "-"
132
+ logger.debug(f"LLM resp had no message. finish_reason: {finish_reason}")
124
133
 
125
134
  if hasattr(response, "usage"):
126
135
  response_usage = response.usage
@@ -151,14 +160,20 @@ class LitellmModel(Model):
151
160
  logger.warning("No usage information returned from Litellm")
152
161
 
153
162
  if tracing.include_data():
154
- span_generation.span_data.output = [response.choices[0].message.model_dump()]
163
+ span_generation.span_data.output = (
164
+ [message.model_dump()] if message is not None else []
165
+ )
155
166
  span_generation.span_data.usage = {
156
167
  "input_tokens": usage.input_tokens,
157
168
  "output_tokens": usage.output_tokens,
158
169
  }
159
170
 
160
- items = Converter.message_to_output_items(
161
- LitellmConverter.convert_message_to_openai(response.choices[0].message)
171
+ items = (
172
+ Converter.message_to_output_items(
173
+ LitellmConverter.convert_message_to_openai(message)
174
+ )
175
+ if message is not None
176
+ else []
162
177
  )
163
178
 
164
179
  return ModelResponse(
@@ -269,7 +284,7 @@ class LitellmModel(Model):
269
284
  )
270
285
 
271
286
  # Fix for interleaved thinking bug: reorder messages to ensure tool_use comes before tool_result # noqa: E501
272
- if preserve_thinking_blocks:
287
+ if "anthropic" in self.model.lower() or "claude" in self.model.lower():
273
288
  converted_messages = self._fix_tool_message_ordering(converted_messages)
274
289
 
275
290
  if system_instructions:
@@ -325,6 +340,23 @@ class LitellmModel(Model):
325
340
  )
326
341
 
327
342
  reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
343
+ # Enable developers to pass non-OpenAI compatible reasoning_effort data like "none"
344
+ # Priority order:
345
+ # 1. model_settings.reasoning.effort
346
+ # 2. model_settings.extra_body["reasoning_effort"]
347
+ # 3. model_settings.extra_args["reasoning_effort"]
348
+ if (
349
+ reasoning_effort is None # Unset in model_settings
350
+ and isinstance(model_settings.extra_body, dict)
351
+ and "reasoning_effort" in model_settings.extra_body
352
+ ):
353
+ reasoning_effort = model_settings.extra_body["reasoning_effort"]
354
+ if (
355
+ reasoning_effort is None # Unset in both model_settings and model_settings.extra_body
356
+ and model_settings.extra_args
357
+ and "reasoning_effort" in model_settings.extra_args
358
+ ):
359
+ reasoning_effort = model_settings.extra_args["reasoning_effort"]
328
360
 
329
361
  stream_options = None
330
362
  if stream and model_settings.include_usage is not None:
@@ -342,6 +374,9 @@ class LitellmModel(Model):
342
374
  if model_settings.extra_args:
343
375
  extra_kwargs.update(model_settings.extra_args)
344
376
 
377
+ # Prevent duplicate reasoning_effort kwargs when it was promoted to a top-level argument.
378
+ extra_kwargs.pop("reasoning_effort", None)
379
+
345
380
  ret = await litellm.acompletion(
346
381
  model=self.model,
347
382
  messages=converted_messages,
@@ -367,15 +402,19 @@ class LitellmModel(Model):
367
402
  if isinstance(ret, litellm.types.utils.ModelResponse):
368
403
  return ret
369
404
 
405
+ responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
406
+ model_settings.tool_choice
407
+ )
408
+ if responses_tool_choice is None or responses_tool_choice is omit:
409
+ responses_tool_choice = "auto"
410
+
370
411
  response = Response(
371
412
  id=FAKE_RESPONSES_ID,
372
413
  created_at=time.time(),
373
414
  model=self.model,
374
415
  object="response",
375
416
  output=[],
376
- tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
377
- if tool_choice is not omit
378
- else "auto",
417
+ tool_choice=responses_tool_choice, # type: ignore[arg-type]
379
418
  top_p=model_settings.top_p,
380
419
  temperature=model_settings.temperature,
381
420
  tools=[],
agents/items.py CHANGED
@@ -361,6 +361,9 @@ class ItemHelpers:
361
361
  if isinstance(output, (ToolOutputText, ToolOutputImage, ToolOutputFileContent)):
362
362
  return output
363
363
  elif isinstance(output, dict):
364
+ # Require explicit 'type' field in dict to be considered a structured output
365
+ if "type" not in output:
366
+ return None
364
367
  try:
365
368
  return ValidToolOutputPydanticModelsTypeAdapter.validate_python(output)
366
369
  except pydantic.ValidationError:
@@ -150,6 +150,12 @@ class ChatCmplStreamHandler:
150
150
  )
151
151
 
152
152
  if reasoning_content and state.reasoning_content_index_and_output:
153
+ # Ensure summary list has at least one element
154
+ if not state.reasoning_content_index_and_output[1].summary:
155
+ state.reasoning_content_index_and_output[1].summary = [
156
+ Summary(text="", type="summary_text")
157
+ ]
158
+
153
159
  yield ResponseReasoningSummaryTextDeltaEvent(
154
160
  delta=reasoning_content,
155
161
  item_id=FAKE_RESPONSES_ID,
@@ -201,7 +207,7 @@ class ChatCmplStreamHandler:
201
207
  )
202
208
 
203
209
  # Create a new summary with updated text
204
- if state.reasoning_content_index_and_output[1].content is None:
210
+ if not state.reasoning_content_index_and_output[1].content:
205
211
  state.reasoning_content_index_and_output[1].content = [
206
212
  Content(text="", type="reasoning_text")
207
213
  ]
agents/realtime/config.py CHANGED
@@ -184,6 +184,9 @@ class RealtimeRunConfig(TypedDict):
184
184
  tracing_disabled: NotRequired[bool]
185
185
  """Whether tracing is disabled for this run."""
186
186
 
187
+ async_tool_calls: NotRequired[bool]
188
+ """Whether function tool calls should run asynchronously. Defaults to True."""
189
+
187
190
  # TODO (rm) Add history audio storage config
188
191
 
189
192
 
@@ -95,6 +95,9 @@ class RealtimeModelSendToolOutput:
95
95
  class RealtimeModelSendInterrupt:
96
96
  """Send an interrupt to the model."""
97
97
 
98
+ force_response_cancel: bool = False
99
+ """Force sending a response.cancel event even if automatic cancellation is enabled."""
100
+
98
101
 
99
102
  @dataclass
100
103
  class RealtimeModelSendSessionUpdate:
@@ -266,7 +266,8 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
266
266
 
267
267
  async def _emit_event(self, event: RealtimeModelEvent) -> None:
268
268
  """Emit an event to the listeners."""
269
- for listener in self._listeners:
269
+ # Copy list to avoid modification during iteration
270
+ for listener in list(self._listeners):
270
271
  await listener.on_event(event)
271
272
 
272
273
  async def _listen_for_messages(self):
@@ -394,6 +395,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
394
395
  current_item_id = playback_state.get("current_item_id")
395
396
  current_item_content_index = playback_state.get("current_item_content_index")
396
397
  elapsed_ms = playback_state.get("elapsed_ms")
398
+
397
399
  if current_item_id is None or elapsed_ms is None:
398
400
  logger.debug(
399
401
  "Skipping interrupt. "
@@ -401,29 +403,28 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
401
403
  f"elapsed ms: {elapsed_ms}, "
402
404
  f"content index: {current_item_content_index}"
403
405
  )
404
- return
405
-
406
- current_item_content_index = current_item_content_index or 0
407
- if elapsed_ms > 0:
408
- await self._emit_event(
409
- RealtimeModelAudioInterruptedEvent(
410
- item_id=current_item_id,
411
- content_index=current_item_content_index,
412
- )
413
- )
414
- converted = _ConversionHelper.convert_interrupt(
415
- current_item_id,
416
- current_item_content_index,
417
- int(elapsed_ms),
418
- )
419
- await self._send_raw_message(converted)
420
406
  else:
421
- logger.debug(
422
- "Didn't interrupt bc elapsed ms is < 0. "
423
- f"Item id: {current_item_id}, "
424
- f"elapsed ms: {elapsed_ms}, "
425
- f"content index: {current_item_content_index}"
426
- )
407
+ current_item_content_index = current_item_content_index or 0
408
+ if elapsed_ms > 0:
409
+ await self._emit_event(
410
+ RealtimeModelAudioInterruptedEvent(
411
+ item_id=current_item_id,
412
+ content_index=current_item_content_index,
413
+ )
414
+ )
415
+ converted = _ConversionHelper.convert_interrupt(
416
+ current_item_id,
417
+ current_item_content_index,
418
+ int(elapsed_ms),
419
+ )
420
+ await self._send_raw_message(converted)
421
+ else:
422
+ logger.debug(
423
+ "Didn't interrupt bc elapsed ms is < 0. "
424
+ f"Item id: {current_item_id}, "
425
+ f"elapsed ms: {elapsed_ms}, "
426
+ f"content index: {current_item_content_index}"
427
+ )
427
428
 
428
429
  session = self._created_session
429
430
  automatic_response_cancellation_enabled = (
@@ -431,14 +432,18 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
431
432
  and session.audio is not None
432
433
  and session.audio.input is not None
433
434
  and session.audio.input.turn_detection is not None
434
- and session.audio.input.turn_detection.interrupt_response is True,
435
+ and session.audio.input.turn_detection.interrupt_response is True
435
436
  )
436
- if not automatic_response_cancellation_enabled:
437
+ should_cancel_response = event.force_response_cancel or (
438
+ not automatic_response_cancellation_enabled
439
+ )
440
+ if should_cancel_response:
437
441
  await self._cancel_response()
438
442
 
439
- self._audio_state_tracker.on_interrupted()
440
- if self._playback_tracker:
441
- self._playback_tracker.on_interrupted()
443
+ if current_item_id is not None and elapsed_ms is not None:
444
+ self._audio_state_tracker.on_interrupted()
445
+ if self._playback_tracker:
446
+ self._playback_tracker.on_interrupted()
442
447
 
443
448
  async def _send_session_update(self, event: RealtimeModelSendSessionUpdate) -> None:
444
449
  """Send a session update to the model."""
@@ -516,6 +521,10 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
516
521
  self._websocket = None
517
522
  if self._websocket_task:
518
523
  self._websocket_task.cancel()
524
+ try:
525
+ await self._websocket_task
526
+ except asyncio.CancelledError:
527
+ pass
519
528
  self._websocket_task = None
520
529
 
521
530
  async def _cancel_response(self) -> None:
@@ -616,7 +625,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
616
625
  and session.audio is not None
617
626
  and session.audio.input is not None
618
627
  and session.audio.input.turn_detection is not None
619
- and session.audio.input.turn_detection.interrupt_response is True,
628
+ and session.audio.input.turn_detection.interrupt_response is True
620
629
  )
621
630
  if not automatic_response_cancellation_enabled:
622
631
  await self._cancel_response()
@@ -112,7 +112,7 @@ class RealtimeSession(RealtimeModelListener):
112
112
  }
113
113
  self._event_queue: asyncio.Queue[RealtimeSessionEvent] = asyncio.Queue()
114
114
  self._closed = False
115
- self._stored_exception: Exception | None = None
115
+ self._stored_exception: BaseException | None = None
116
116
 
117
117
  # Guardrails state tracking
118
118
  self._interrupted_response_ids: set[str] = set()
@@ -123,6 +123,8 @@ class RealtimeSession(RealtimeModelListener):
123
123
  )
124
124
 
125
125
  self._guardrail_tasks: set[asyncio.Task[Any]] = set()
126
+ self._tool_call_tasks: set[asyncio.Task[Any]] = set()
127
+ self._async_tool_calls: bool = bool(self._run_config.get("async_tool_calls", True))
126
128
 
127
129
  @property
128
130
  def model(self) -> RealtimeModel:
@@ -216,7 +218,11 @@ class RealtimeSession(RealtimeModelListener):
216
218
  if event.type == "error":
217
219
  await self._put_event(RealtimeError(info=self._event_info, error=event.error))
218
220
  elif event.type == "function_call":
219
- await self._handle_tool_call(event)
221
+ agent_snapshot = self._current_agent
222
+ if self._async_tool_calls:
223
+ self._enqueue_tool_call_task(event, agent_snapshot)
224
+ else:
225
+ await self._handle_tool_call(event, agent_snapshot=agent_snapshot)
220
226
  elif event.type == "audio":
221
227
  await self._put_event(
222
228
  RealtimeAudio(
@@ -384,11 +390,17 @@ class RealtimeSession(RealtimeModelListener):
384
390
  """Put an event into the queue."""
385
391
  await self._event_queue.put(event)
386
392
 
387
- async def _handle_tool_call(self, event: RealtimeModelToolCallEvent) -> None:
393
+ async def _handle_tool_call(
394
+ self,
395
+ event: RealtimeModelToolCallEvent,
396
+ *,
397
+ agent_snapshot: RealtimeAgent | None = None,
398
+ ) -> None:
388
399
  """Handle a tool call event."""
400
+ agent = agent_snapshot or self._current_agent
389
401
  tools, handoffs = await asyncio.gather(
390
- self._current_agent.get_all_tools(self._context_wrapper),
391
- self._get_handoffs(self._current_agent, self._context_wrapper),
402
+ agent.get_all_tools(self._context_wrapper),
403
+ self._get_handoffs(agent, self._context_wrapper),
392
404
  )
393
405
  function_map = {tool.name: tool for tool in tools if isinstance(tool, FunctionTool)}
394
406
  handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
@@ -398,7 +410,7 @@ class RealtimeSession(RealtimeModelListener):
398
410
  RealtimeToolStart(
399
411
  info=self._event_info,
400
412
  tool=function_map[event.name],
401
- agent=self._current_agent,
413
+ agent=agent,
402
414
  )
403
415
  )
404
416
 
@@ -423,7 +435,7 @@ class RealtimeSession(RealtimeModelListener):
423
435
  info=self._event_info,
424
436
  tool=func_tool,
425
437
  output=result,
426
- agent=self._current_agent,
438
+ agent=agent,
427
439
  )
428
440
  )
429
441
  elif event.name in handoff_map:
@@ -444,7 +456,7 @@ class RealtimeSession(RealtimeModelListener):
444
456
  )
445
457
 
446
458
  # Store previous agent for event
447
- previous_agent = self._current_agent
459
+ previous_agent = agent
448
460
 
449
461
  # Update current agent
450
462
  self._current_agent = result
@@ -704,7 +716,7 @@ class RealtimeSession(RealtimeModelListener):
704
716
  )
705
717
 
706
718
  # Interrupt the model
707
- await self._model.send_event(RealtimeModelSendInterrupt())
719
+ await self._model.send_event(RealtimeModelSendInterrupt(force_response_cancel=True))
708
720
 
709
721
  # Send guardrail triggered message
710
722
  guardrail_names = [result.guardrail.get_name() for result in triggered_results]
@@ -752,10 +764,49 @@ class RealtimeSession(RealtimeModelListener):
752
764
  task.cancel()
753
765
  self._guardrail_tasks.clear()
754
766
 
767
+ def _enqueue_tool_call_task(
768
+ self, event: RealtimeModelToolCallEvent, agent_snapshot: RealtimeAgent
769
+ ) -> None:
770
+ """Run tool calls in the background to avoid blocking realtime transport."""
771
+ task = asyncio.create_task(self._handle_tool_call(event, agent_snapshot=agent_snapshot))
772
+ self._tool_call_tasks.add(task)
773
+ task.add_done_callback(self._on_tool_call_task_done)
774
+
775
+ def _on_tool_call_task_done(self, task: asyncio.Task[Any]) -> None:
776
+ self._tool_call_tasks.discard(task)
777
+
778
+ if task.cancelled():
779
+ return
780
+
781
+ exception = task.exception()
782
+ if exception is None:
783
+ return
784
+
785
+ logger.exception("Realtime tool call task failed", exc_info=exception)
786
+
787
+ if self._stored_exception is None:
788
+ self._stored_exception = exception
789
+
790
+ asyncio.create_task(
791
+ self._put_event(
792
+ RealtimeError(
793
+ info=self._event_info,
794
+ error={"message": f"Tool call task failed: {exception}"},
795
+ )
796
+ )
797
+ )
798
+
799
+ def _cleanup_tool_call_tasks(self) -> None:
800
+ for task in self._tool_call_tasks:
801
+ if not task.done():
802
+ task.cancel()
803
+ self._tool_call_tasks.clear()
804
+
755
805
  async def _cleanup(self) -> None:
756
806
  """Clean up all resources and mark session as closed."""
757
807
  # Cancel and cleanup guardrail tasks
758
808
  self._cleanup_guardrail_tasks()
809
+ self._cleanup_tool_call_tasks()
759
810
 
760
811
  # Remove ourselves as a listener
761
812
  self._model.remove_listener(self)
agents/run.py CHANGED
@@ -1138,6 +1138,15 @@ class AgentRunner:
1138
1138
 
1139
1139
  streamed_result.is_complete = True
1140
1140
  finally:
1141
+ if streamed_result._input_guardrails_task:
1142
+ try:
1143
+ await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
1144
+ streamed_result
1145
+ )
1146
+ except Exception as e:
1147
+ logger.debug(
1148
+ f"Error in streamed_result finalize for agent {current_agent.name} - {e}"
1149
+ )
1141
1150
  if current_span:
1142
1151
  current_span.finish(reset_current=True)
1143
1152
  if streamed_result.trace:
agents/stream_events.py CHANGED
@@ -37,6 +37,7 @@ class RunItemStreamEvent:
37
37
  "tool_output",
38
38
  "reasoning_item_created",
39
39
  "mcp_approval_requested",
40
+ "mcp_approval_response",
40
41
  "mcp_list_tools",
41
42
  ]
42
43
  """The name of the event."""
agents/tool.py CHANGED
@@ -15,7 +15,7 @@ from openai.types.responses.response_output_item import LocalShellCall, McpAppro
15
15
  from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
16
16
  from openai.types.responses.web_search_tool import Filters as WebSearchToolFilters
17
17
  from openai.types.responses.web_search_tool_param import UserLocation
18
- from pydantic import BaseModel, TypeAdapter, ValidationError
18
+ from pydantic import BaseModel, TypeAdapter, ValidationError, model_validator
19
19
  from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
20
20
 
21
21
  from . import _debug
@@ -75,6 +75,13 @@ class ToolOutputImage(BaseModel):
75
75
  file_id: str | None = None
76
76
  detail: Literal["low", "high", "auto"] | None = None
77
77
 
78
+ @model_validator(mode="after")
79
+ def check_at_least_one_required_field(self) -> ToolOutputImage:
80
+ """Validate that at least one of image_url or file_id is provided."""
81
+ if self.image_url is None and self.file_id is None:
82
+ raise ValueError("At least one of image_url or file_id must be provided")
83
+ return self
84
+
78
85
 
79
86
  class ToolOutputImageDict(TypedDict, total=False):
80
87
  """TypedDict variant for image tool outputs."""
@@ -98,6 +105,13 @@ class ToolOutputFileContent(BaseModel):
98
105
  file_id: str | None = None
99
106
  filename: str | None = None
100
107
 
108
+ @model_validator(mode="after")
109
+ def check_at_least_one_required_field(self) -> ToolOutputFileContent:
110
+ """Validate that at least one of file_data, file_url, or file_id is provided."""
111
+ if self.file_data is None and self.file_url is None and self.file_id is None:
112
+ raise ValueError("At least one of file_data, file_url, or file_id must be provided")
113
+ return self
114
+
101
115
 
102
116
  class ToolOutputFileContentDict(TypedDict, total=False):
103
117
  """TypedDict variant for file content tool outputs."""
@@ -122,7 +122,8 @@ class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):
122
122
  return
123
123
 
124
124
  if self._tracing_span:
125
- if self._trace_include_sensitive_audio_data:
125
+ # Only encode audio if tracing is enabled AND buffer is not empty
126
+ if self._trace_include_sensitive_audio_data and self._turn_audio_buffer:
126
127
  self._tracing_span.span_data.input = _audio_to_base64(self._turn_audio_buffer)
127
128
 
128
129
  self._tracing_span.span_data.input_format = "pcm"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -44,7 +44,7 @@ Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
44
44
  Requires-Dist: websockets<16,>=15.0; extra == 'voice'
45
45
  Description-Content-Type: text/markdown
46
46
 
47
- # OpenAI Agents SDK
47
+ # OpenAI Agents SDK [![PyPI](https://img.shields.io/pypi/v/openai-agents?label=pypi%20package)](https://pypi.org/project/openai-agents/)
48
48
 
49
49
  The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows. It is provider-agnostic, supporting the OpenAI Responses and Chat Completions APIs, as well as 100+ other LLMs.
50
50
 
@@ -1,7 +1,7 @@
1
1
  agents/__init__.py,sha256=qdaXm0t_NP4B78ODns3J9FAy4AurgDcQGNkXf9y_pL8,9036
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=dRe2dUlA9bCLp6f8bAdiX7JfGyJuHyS_DRdW0kZshl0,856
4
- agents/_run_impl.py,sha256=WXxiCS_GMYhY8Z3SjxVOgcEk3YgtjdoM3pr6FxJXUi8,55174
4
+ agents/_run_impl.py,sha256=aGPdjlTYg2lcJaorxx8rCtVvlMzfpXyfBb7PqggF44Q,55318
5
5
  agents/agent.py,sha256=P5AzwKz3FiQJjzfautF0R9JzxkTXEeItcEkJgn8z5mM,19832
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
@@ -9,7 +9,7 @@ agents/exceptions.py,sha256=roJsYttB5i7FQlzRQNg8QSVdALZFz5u7kUeVvJdaitE,4156
9
9
  agents/function_schema.py,sha256=njtbLt44DOkIU0a0U8TeDNEx-iQZU8oohwy3k7-k4A8,14855
10
10
  agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
11
  agents/handoffs.py,sha256=kDTM3nj3E_0khiJPMJAIN00gektMTRNbaYSbc5ZCnBM,11411
12
- agents/items.py,sha256=9-it0ZFCy_igRtpBIk7BTebdvK1G_Ygt16B24gFvN5w,14216
12
+ agents/items.py,sha256=YoAhxwb2PSgClGGWrkTIWufAxg0F1cS1KohLYonwz6I,14370
13
13
  agents/lifecycle.py,sha256=hGsqzumOSaal6oAjTqTfvBXl-ShAOkC42sthJigB5Fg,4308
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
15
  agents/model_settings.py,sha256=7Ul-Xg-aNVXIbK6V4Rm2t5EEfNR0tsy_A9ac_wFqLLk,6828
@@ -17,11 +17,11 @@ agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
19
19
  agents/result.py,sha256=FW3-fsYOIJrn7pjiDjWPHN58pPpYfNoFNTympFV_96k,13963
20
- agents/run.py,sha256=z0SKQx2jk4nHeWkBII2ZDpEY4dxm6Ce6Ud8I9GN7N9A,74508
20
+ agents/run.py,sha256=I3KwURXqqQwDYF3gCN9KmpIpxakJhwIj6NnYvxvvd5I,74925
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
- agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
22
+ agents/stream_events.py,sha256=vW7O5T6iwFuRFvds1Bq3zMB60fRCz7lWryMkHSL-bAo,1733
23
23
  agents/strict_schema.py,sha256=HFm4j753-UKDfJ0zSiQYf5V1qGuHY6TRm2zzwI0f0E0,6382
24
- agents/tool.py,sha256=H-IytTUTna7qTQbfraRystrgKeYU5Cc1zFKAeXqwO5A,19537
24
+ agents/tool.py,sha256=yNCqv4gu1g0Gxrt9YPYdSkZaWlO3d84vlEwxOegVRng,20304
25
25
  agents/tool_context.py,sha256=g53mgaeX7kCwPaIReiwuUejD8qC7QejMS-F3Wnkuhhg,1866
26
26
  agents/tool_guardrails.py,sha256=2uXEr_R5AWy9NHtBjd7G7upc3uZSuoP86Hfsc-qTadM,8344
27
27
  agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
@@ -34,9 +34,9 @@ agents/extensions/memory/__init__.py,sha256=jhx0pa1TVRFooiI0ERRhklkaeiZ-NDGA_b9t
34
34
  agents/extensions/memory/advanced_sqlite_session.py,sha256=rCrXM878foAuBN-rN2fibP2GHs-1hTtRx-TQcDKIfGI,52883
35
35
  agents/extensions/memory/encrypt_session.py,sha256=PVnZIEj50bjUq16OLnMKrbZiinLkrVpamPPEw8RnUCA,6485
36
36
  agents/extensions/memory/redis_session.py,sha256=JwXY6zUTMgq9bRezlyFZ4Tze7DO7T0hioTc23qjSHjU,9838
37
- agents/extensions/memory/sqlalchemy_session.py,sha256=LlZ5Gx1zR9cRGwM__LkQCHzK7ItT9hfACo16hCQWlQU,11879
37
+ agents/extensions/memory/sqlalchemy_session.py,sha256=fnlZkNF_XZekP44uhiR4rjlCkwG7JJEiFm35TJfiCtc,12325
38
38
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- agents/extensions/models/litellm_model.py,sha256=D4dbbOHHgdBPADAEYHlFhma9645r3gN9U7gg4713mhw,23653
39
+ agents/extensions/models/litellm_model.py,sha256=hbQFhAeEF5eHdyu5Q-7HNYFEhmn0KFK2KAcfYA10vqc,25621
40
40
  agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
41
41
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
42
42
  agents/mcp/server.py,sha256=cby0KKKKRhuWCydr4llERPL72Z94uV-SV3LLAcgcWTk,28435
@@ -50,7 +50,7 @@ agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
50
50
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
51
51
  agents/models/chatcmpl_converter.py,sha256=qEobLnIJjrK6WRi_tsVkrDrGq78EGro3MZXlVMpMK2c,26011
52
52
  agents/models/chatcmpl_helpers.py,sha256=YC2krp_-uBgRCrCEImLjNvONTWRWfwLlPKHI4kBmNXE,1483
53
- agents/models/chatcmpl_stream_handler.py,sha256=r8nc-4hJg1plw87y24MD48O23xnfC_2gHKowtOYgO3M,28896
53
+ agents/models/chatcmpl_stream_handler.py,sha256=1h0esxmnlBk9NwDjjwSlWYzjzuMgIpMLtRU9kaszfyg,29212
54
54
  agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
55
55
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
56
56
  agents/models/interface.py,sha256=-AFUHC8iRuGZmtQwguDw4s-M4OPL2y2mct4TAmWvVrU,4057
@@ -64,16 +64,16 @@ agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfP
64
64
  agents/realtime/_util.py,sha256=ehBzUN1RTD2m2TXq73Jm4WohQzJ6y_MfnF5MaK8uu14,341
65
65
  agents/realtime/agent.py,sha256=bkegBJ_lc3z3NtnlIyEkVZFxZWBJwVjsQVzpQZAu7PM,4283
66
66
  agents/realtime/audio_formats.py,sha256=DBUWVVff4XY5BT6Mol86tF4PFMp5OIS3LmAbqUmQn_k,1019
67
- agents/realtime/config.py,sha256=ud0GK8ZbcnKRC4oGZNwpsiZI8TZ1OdTSMADfFtM8Z6I,6948
67
+ agents/realtime/config.py,sha256=vnjgkeZXcOSLFopoAiGj4Vki_75pEJIKTagJtQpCWmg,7072
68
68
  agents/realtime/events.py,sha256=eANiNNyYlp_1Ybdl-MOwXRVTDtrK9hfgn6iw0xNxnaY,5889
69
69
  agents/realtime/handoffs.py,sha256=iJ4lr5RVdDkw5W3_AOGB_Az-hlRt1CoFFFNFDfd3ues,6698
70
70
  agents/realtime/items.py,sha256=5EG768FkKpbk-dhe4b_7BfFpdUEFWtxoiVUtNI9KXsc,5517
71
71
  agents/realtime/model.py,sha256=Lnb9pEcvnlIdXJUcldVyioaX5lpmrBou5FZoNJe4XfA,6457
72
72
  agents/realtime/model_events.py,sha256=2NKofzLszKHwtlcsogsNnH6hdeFfO7S96yWDB4AlxB8,4340
73
- agents/realtime/model_inputs.py,sha256=gRas0-ohirmGbCMWc8tHTo-e3ZPcPn7TK9BauCK9ynA,2657
74
- agents/realtime/openai_realtime.py,sha256=x3dLSax3DF-hbQDSPXUtvHalN3nlwwcXYBIa36_ZqNo,44307
73
+ agents/realtime/model_inputs.py,sha256=-pl8Oj0WVrA5Gt-dqP5Va3ZHqXyIXpsjMsf9UL-suEY,2789
74
+ agents/realtime/openai_realtime.py,sha256=jN3OvcEQt9X-59t6InllkOOEd8Tdw69K5vuKfXBeObg,44763
75
75
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
76
- agents/realtime/session.py,sha256=e4fJ3E5lS_y5IfczPAnX81vHr5rvEzJbT1LsmVdW7lc,35199
76
+ agents/realtime/session.py,sha256=79WqKWwGOsutQRLs7fDsijE-OxEJjGm-aOpjL5F7Fn8,36983
77
77
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
78
78
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
79
79
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -106,9 +106,9 @@ agents/voice/utils.py,sha256=MrRomVqBLXeMAOue-Itwh0Fc5HjB0QCMKXclqFPhrbI,1309
106
106
  agents/voice/workflow.py,sha256=m_-_4qU1gEE5gcGahiE2IrIimmRW2X1rR20zZEGivSc,3795
107
107
  agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
108
108
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
109
- agents/voice/models/openai_stt.py,sha256=eZ0dmX_uDywpR1H3Q2N5jrV7NK3bR9l2a1InWM3yegk,17151
109
+ agents/voice/models/openai_stt.py,sha256=Lb_F9160VNKDHXZ9zylSzeig7sB8lBjiYhQLDZsp6NQ,17257
110
110
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
111
- openai_agents-0.4.0.dist-info/METADATA,sha256=oz5c_NXJgwN1WWVSpsQDb9MYB9tz7TZA1v2lqySphmM,12929
112
- openai_agents-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
- openai_agents-0.4.0.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
114
- openai_agents-0.4.0.dist-info/RECORD,,
111
+ openai_agents-0.4.2.dist-info/METADATA,sha256=UUyVoFXNYwTLrBnkpo7MFwT73-kJH0rQX53xwF3pFXw,13046
112
+ openai_agents-0.4.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
+ openai_agents-0.4.2.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
114
+ openai_agents-0.4.2.dist-info/RECORD,,