openai-agents 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agents/agent.py CHANGED
@@ -223,6 +223,119 @@ class Agent(AgentBase, Generic[TContext]):
223
223
  """Whether to reset the tool choice to the default value after a tool has been called. Defaults
224
224
  to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
225
225
 
226
+ def __post_init__(self):
227
+ from typing import get_origin
228
+
229
+ if not isinstance(self.name, str):
230
+ raise TypeError(f"Agent name must be a string, got {type(self.name).__name__}")
231
+
232
+ if self.handoff_description is not None and not isinstance(self.handoff_description, str):
233
+ raise TypeError(
234
+ f"Agent handoff_description must be a string or None, "
235
+ f"got {type(self.handoff_description).__name__}"
236
+ )
237
+
238
+ if not isinstance(self.tools, list):
239
+ raise TypeError(f"Agent tools must be a list, got {type(self.tools).__name__}")
240
+
241
+ if not isinstance(self.mcp_servers, list):
242
+ raise TypeError(
243
+ f"Agent mcp_servers must be a list, got {type(self.mcp_servers).__name__}"
244
+ )
245
+
246
+ if not isinstance(self.mcp_config, dict):
247
+ raise TypeError(
248
+ f"Agent mcp_config must be a dict, got {type(self.mcp_config).__name__}"
249
+ )
250
+
251
+ if (
252
+ self.instructions is not None
253
+ and not isinstance(self.instructions, str)
254
+ and not callable(self.instructions)
255
+ ):
256
+ raise TypeError(
257
+ f"Agent instructions must be a string, callable, or None, "
258
+ f"got {type(self.instructions).__name__}"
259
+ )
260
+
261
+ if (
262
+ self.prompt is not None
263
+ and not callable(self.prompt)
264
+ and not hasattr(self.prompt, "get")
265
+ ):
266
+ raise TypeError(
267
+ f"Agent prompt must be a Prompt, DynamicPromptFunction, or None, "
268
+ f"got {type(self.prompt).__name__}"
269
+ )
270
+
271
+ if not isinstance(self.handoffs, list):
272
+ raise TypeError(f"Agent handoffs must be a list, got {type(self.handoffs).__name__}")
273
+
274
+ if self.model is not None and not isinstance(self.model, str):
275
+ from .models.interface import Model
276
+
277
+ if not isinstance(self.model, Model):
278
+ raise TypeError(
279
+ f"Agent model must be a string, Model, or None, got {type(self.model).__name__}"
280
+ )
281
+
282
+ if not isinstance(self.model_settings, ModelSettings):
283
+ raise TypeError(
284
+ f"Agent model_settings must be a ModelSettings instance, "
285
+ f"got {type(self.model_settings).__name__}"
286
+ )
287
+
288
+ if not isinstance(self.input_guardrails, list):
289
+ raise TypeError(
290
+ f"Agent input_guardrails must be a list, got {type(self.input_guardrails).__name__}"
291
+ )
292
+
293
+ if not isinstance(self.output_guardrails, list):
294
+ raise TypeError(
295
+ f"Agent output_guardrails must be a list, "
296
+ f"got {type(self.output_guardrails).__name__}"
297
+ )
298
+
299
+ if self.output_type is not None:
300
+ from .agent_output import AgentOutputSchemaBase
301
+
302
+ if not (
303
+ isinstance(self.output_type, (type, AgentOutputSchemaBase))
304
+ or get_origin(self.output_type) is not None
305
+ ):
306
+ raise TypeError(
307
+ f"Agent output_type must be a type, AgentOutputSchemaBase, or None, "
308
+ f"got {type(self.output_type).__name__}"
309
+ )
310
+
311
+ if self.hooks is not None:
312
+ from .lifecycle import AgentHooksBase
313
+
314
+ if not isinstance(self.hooks, AgentHooksBase):
315
+ raise TypeError(
316
+ f"Agent hooks must be an AgentHooks instance or None, "
317
+ f"got {type(self.hooks).__name__}"
318
+ )
319
+
320
+ if (
321
+ not (
322
+ isinstance(self.tool_use_behavior, str)
323
+ and self.tool_use_behavior in ["run_llm_again", "stop_on_first_tool"]
324
+ )
325
+ and not isinstance(self.tool_use_behavior, dict)
326
+ and not callable(self.tool_use_behavior)
327
+ ):
328
+ raise TypeError(
329
+ f"Agent tool_use_behavior must be 'run_llm_again', 'stop_on_first_tool', "
330
+ f"StopAtTools dict, or callable, got {type(self.tool_use_behavior).__name__}"
331
+ )
332
+
333
+ if not isinstance(self.reset_tool_choice, bool):
334
+ raise TypeError(
335
+ f"Agent reset_tool_choice must be a boolean, "
336
+ f"got {type(self.reset_tool_choice).__name__}"
337
+ )
338
+
226
339
  def clone(self, **kwargs: Any) -> Agent[TContext]:
227
340
  """Make a copy of the agent, with the given arguments changed.
228
341
  Notes:
@@ -280,16 +393,31 @@ class Agent(AgentBase, Generic[TContext]):
280
393
  return run_agent
281
394
 
282
395
  async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> str | None:
283
- """Get the system prompt for the agent."""
284
396
  if isinstance(self.instructions, str):
285
397
  return self.instructions
286
398
  elif callable(self.instructions):
399
+ # Inspect the signature of the instructions function
400
+ sig = inspect.signature(self.instructions)
401
+ params = list(sig.parameters.values())
402
+
403
+ # Enforce exactly 2 parameters
404
+ if len(params) != 2:
405
+ raise TypeError(
406
+ f"'instructions' callable must accept exactly 2 arguments (context, agent), "
407
+ f"but got {len(params)}: {[p.name for p in params]}"
408
+ )
409
+
410
+ # Call the instructions function properly
287
411
  if inspect.iscoroutinefunction(self.instructions):
288
412
  return await cast(Awaitable[str], self.instructions(run_context, self))
289
413
  else:
290
414
  return cast(str, self.instructions(run_context, self))
415
+
291
416
  elif self.instructions is not None:
292
- logger.error(f"Instructions must be a string or a function, got {self.instructions}")
417
+ logger.error(
418
+ f"Instructions must be a string or a callable function, "
419
+ f"got {type(self.instructions).__name__}"
420
+ )
293
421
 
294
422
  return None
295
423
 
agents/items.py CHANGED
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import abc
4
- import copy
5
4
  from dataclasses import dataclass
6
5
  from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union
7
6
 
@@ -277,7 +276,7 @@ class ItemHelpers:
277
276
  "role": "user",
278
277
  }
279
278
  ]
280
- return copy.deepcopy(input)
279
+ return input.copy()
281
280
 
282
281
  @classmethod
283
282
  def text_message_outputs(cls, items: list[RunItem]) -> str:
agents/model_settings.py CHANGED
@@ -102,6 +102,10 @@ class ModelSettings:
102
102
  [reasoning models](https://platform.openai.com/docs/guides/reasoning).
103
103
  """
104
104
 
105
+ verbosity: Literal["low", "medium", "high"] | None = None
106
+ """Constrains the verbosity of the model's response.
107
+ """
108
+
105
109
  metadata: dict[str, str] | None = None
106
110
  """Metadata to include with the model response call."""
107
111
 
@@ -287,6 +287,7 @@ class OpenAIChatCompletionsModel(Model):
287
287
  stream_options=self._non_null_or_not_given(stream_options),
288
288
  store=self._non_null_or_not_given(store),
289
289
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
290
+ verbosity=self._non_null_or_not_given(model_settings.verbosity),
290
291
  top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
291
292
  extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
292
293
  extra_query=model_settings.extra_query,
@@ -270,6 +270,11 @@ class OpenAIResponsesModel(Model):
270
270
  extra_args = dict(model_settings.extra_args or {})
271
271
  if model_settings.top_logprobs is not None:
272
272
  extra_args["top_logprobs"] = model_settings.top_logprobs
273
+ if model_settings.verbosity is not None:
274
+ if response_format != NOT_GIVEN:
275
+ response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
276
+ else:
277
+ response_format = {"verbosity": model_settings.verbosity}
273
278
 
274
279
  return await self._client.responses.create(
275
280
  previous_response_id=self._non_null_or_not_given(previous_response_id),
@@ -170,7 +170,10 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
170
170
  "OpenAI-Beta": "realtime=v1",
171
171
  }
172
172
  self._websocket = await websockets.connect(
173
- url, user_agent_header=_USER_AGENT, additional_headers=headers
173
+ url,
174
+ user_agent_header=_USER_AGENT,
175
+ additional_headers=headers,
176
+ max_size=None, # Allow any size of message
174
177
  )
175
178
  self._websocket_task = asyncio.create_task(self._listen_for_messages())
176
179
  await self._update_session_config(model_settings)
@@ -10,6 +10,7 @@ from typing_extensions import assert_never
10
10
  from ..agent import Agent
11
11
  from ..exceptions import ModelBehaviorError, UserError
12
12
  from ..handoffs import Handoff
13
+ from ..logger import logger
13
14
  from ..run_context import RunContextWrapper, TContext
14
15
  from ..tool import FunctionTool
15
16
  from ..tool_context import ToolContext
@@ -33,7 +34,7 @@ from .events import (
33
34
  RealtimeToolStart,
34
35
  )
35
36
  from .handoffs import realtime_handoff
36
- from .items import InputAudio, InputText, RealtimeItem
37
+ from .items import AssistantAudio, InputAudio, InputText, RealtimeItem
37
38
  from .model import RealtimeModel, RealtimeModelConfig, RealtimeModelListener
38
39
  from .model_events import (
39
40
  RealtimeModelEvent,
@@ -98,7 +99,7 @@ class RealtimeSession(RealtimeModelListener):
98
99
  self._stored_exception: Exception | None = None
99
100
 
100
101
  # Guardrails state tracking
101
- self._interrupted_by_guardrail = False
102
+ self._interrupted_response_ids: set[str] = set()
102
103
  self._item_transcripts: dict[str, str] = {} # item_id -> accumulated transcript
103
104
  self._item_guardrail_run_counts: dict[str, int] = {} # item_id -> run count
104
105
  self._debounce_text_length = self._run_config.get("guardrails_settings", {}).get(
@@ -242,10 +243,62 @@ class RealtimeSession(RealtimeModelListener):
242
243
 
243
244
  if current_length >= next_run_threshold:
244
245
  self._item_guardrail_run_counts[item_id] += 1
245
- self._enqueue_guardrail_task(self._item_transcripts[item_id])
246
+ # Pass response_id so we can ensure only a single interrupt per response
247
+ self._enqueue_guardrail_task(self._item_transcripts[item_id], event.response_id)
246
248
  elif event.type == "item_updated":
247
249
  is_new = not any(item.item_id == event.item.item_id for item in self._history)
248
- self._history = self._get_new_history(self._history, event.item)
250
+
251
+ # Preserve previously known transcripts when updating existing items.
252
+ # This prevents transcripts from disappearing when an item is later
253
+ # retrieved without transcript fields populated.
254
+ incoming_item = event.item
255
+ existing_item = next(
256
+ (i for i in self._history if i.item_id == incoming_item.item_id), None
257
+ )
258
+
259
+ if (
260
+ existing_item is not None
261
+ and existing_item.type == "message"
262
+ and incoming_item.type == "message"
263
+ ):
264
+ try:
265
+ # Merge transcripts for matching content indices
266
+ existing_content = existing_item.content
267
+ new_content = []
268
+ for idx, entry in enumerate(incoming_item.content):
269
+ # Only attempt to preserve for audio-like content
270
+ if entry.type in ("audio", "input_audio"):
271
+ # Use tuple form for Python 3.9 compatibility
272
+ assert isinstance(entry, (InputAudio, AssistantAudio))
273
+ # Determine if transcript is missing/empty on the incoming entry
274
+ entry_transcript = entry.transcript
275
+ if not entry_transcript:
276
+ preserved: str | None = None
277
+ # First prefer any transcript from the existing history item
278
+ if idx < len(existing_content):
279
+ this_content = existing_content[idx]
280
+ if isinstance(this_content, AssistantAudio) or isinstance(
281
+ this_content, InputAudio
282
+ ):
283
+ preserved = this_content.transcript
284
+
285
+ # If still missing and this is an assistant item, fall back to
286
+ # accumulated transcript deltas tracked during the turn.
287
+ if not preserved and incoming_item.role == "assistant":
288
+ preserved = self._item_transcripts.get(incoming_item.item_id)
289
+
290
+ if preserved:
291
+ entry = entry.model_copy(update={"transcript": preserved})
292
+
293
+ new_content.append(entry)
294
+
295
+ if new_content:
296
+ incoming_item = incoming_item.model_copy(update={"content": new_content})
297
+ except Exception:
298
+ logger.error("Error merging transcripts", exc_info=True)
299
+ pass
300
+
301
+ self._history = self._get_new_history(self._history, incoming_item)
249
302
  if is_new:
250
303
  new_item = next(
251
304
  item for item in self._history if item.item_id == event.item.item_id
@@ -274,7 +327,6 @@ class RealtimeSession(RealtimeModelListener):
274
327
  # Clear guardrail state for next turn
275
328
  self._item_transcripts.clear()
276
329
  self._item_guardrail_run_counts.clear()
277
- self._interrupted_by_guardrail = False
278
330
 
279
331
  await self._put_event(
280
332
  RealtimeAgentEndEvent(
@@ -442,7 +494,7 @@ class RealtimeSession(RealtimeModelListener):
442
494
  # Otherwise, add it to the end
443
495
  return old_history + [event]
444
496
 
445
- async def _run_output_guardrails(self, text: str) -> bool:
497
+ async def _run_output_guardrails(self, text: str, response_id: str) -> bool:
446
498
  """Run output guardrails on the given text. Returns True if any guardrail was triggered."""
447
499
  combined_guardrails = self._current_agent.output_guardrails + self._run_config.get(
448
500
  "output_guardrails", []
@@ -455,7 +507,8 @@ class RealtimeSession(RealtimeModelListener):
455
507
  output_guardrails.append(guardrail)
456
508
  seen_ids.add(guardrail_id)
457
509
 
458
- if not output_guardrails or self._interrupted_by_guardrail:
510
+ # If we've already interrupted this response, skip
511
+ if not output_guardrails or response_id in self._interrupted_response_ids:
459
512
  return False
460
513
 
461
514
  triggered_results = []
@@ -475,8 +528,12 @@ class RealtimeSession(RealtimeModelListener):
475
528
  continue
476
529
 
477
530
  if triggered_results:
478
- # Mark as interrupted to prevent multiple interrupts
479
- self._interrupted_by_guardrail = True
531
+ # Double-check: bail if already interrupted for this response
532
+ if response_id in self._interrupted_response_ids:
533
+ return False
534
+
535
+ # Mark as interrupted immediately (before any awaits) to minimize race window
536
+ self._interrupted_response_ids.add(response_id)
480
537
 
481
538
  # Emit guardrail tripped event
482
539
  await self._put_event(
@@ -502,10 +559,10 @@ class RealtimeSession(RealtimeModelListener):
502
559
 
503
560
  return False
504
561
 
505
- def _enqueue_guardrail_task(self, text: str) -> None:
562
+ def _enqueue_guardrail_task(self, text: str, response_id: str) -> None:
506
563
  # Runs the guardrails in a separate task to avoid blocking the main loop
507
564
 
508
- task = asyncio.create_task(self._run_output_guardrails(text))
565
+ task = asyncio.create_task(self._run_output_guardrails(text, response_id))
509
566
  self._guardrail_tasks.add(task)
510
567
 
511
568
  # Add callback to remove completed tasks and handle exceptions
agents/run.py CHANGED
@@ -1,10 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- import copy
5
4
  import inspect
6
5
  from dataclasses import dataclass, field
7
- from typing import Any, Generic, cast
6
+ from typing import Any, Callable, Generic, cast
8
7
 
9
8
  from openai.types.responses import ResponseCompletedEvent
10
9
  from openai.types.responses.response_prompt_param import (
@@ -56,6 +55,7 @@ from .tracing import Span, SpanError, agent_span, get_current_trace, trace
56
55
  from .tracing.span_data import AgentSpanData
57
56
  from .usage import Usage
58
57
  from .util import _coro, _error_tracing
58
+ from .util._types import MaybeAwaitable
59
59
 
60
60
  DEFAULT_MAX_TURNS = 10
61
61
 
@@ -81,6 +81,27 @@ def get_default_agent_runner() -> AgentRunner:
81
81
  return DEFAULT_AGENT_RUNNER
82
82
 
83
83
 
84
+ @dataclass
85
+ class ModelInputData:
86
+ """Container for the data that will be sent to the model."""
87
+
88
+ input: list[TResponseInputItem]
89
+ instructions: str | None
90
+
91
+
92
+ @dataclass
93
+ class CallModelData(Generic[TContext]):
94
+ """Data passed to `RunConfig.call_model_input_filter` prior to model call."""
95
+
96
+ model_data: ModelInputData
97
+ agent: Agent[TContext]
98
+ context: TContext | None
99
+
100
+
101
+ # Type alias for the optional input filter callback
102
+ CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
103
+
104
+
84
105
  @dataclass
85
106
  class RunConfig:
86
107
  """Configures settings for the entire agent run."""
@@ -139,6 +160,16 @@ class RunConfig:
139
160
  An optional dictionary of additional metadata to include with the trace.
140
161
  """
141
162
 
163
+ call_model_input_filter: CallModelInputFilter | None = None
164
+ """
165
+ Optional callback that is invoked immediately before calling the model. It receives the current
166
+ agent, context and the model input (instructions and input items), and must return a possibly
167
+ modified `ModelInputData` to use for the model call.
168
+
169
+ This allows you to edit the input sent to the model e.g. to stay within a token limit.
170
+ For example, you can use this to add a system prompt to the input.
171
+ """
172
+
142
173
 
143
174
  class RunOptions(TypedDict, Generic[TContext]):
144
175
  """Arguments for ``AgentRunner`` methods."""
@@ -355,7 +386,7 @@ class AgentRunner:
355
386
  disabled=run_config.tracing_disabled,
356
387
  ):
357
388
  current_turn = 0
358
- original_input: str | list[TResponseInputItem] = copy.deepcopy(prepared_input)
389
+ original_input: str | list[TResponseInputItem] = _copy_str_or_list(prepared_input)
359
390
  generated_items: list[RunItem] = []
360
391
  model_responses: list[ModelResponse] = []
361
392
 
@@ -414,7 +445,7 @@ class AgentRunner:
414
445
  starting_agent,
415
446
  starting_agent.input_guardrails
416
447
  + (run_config.input_guardrails or []),
417
- copy.deepcopy(prepared_input),
448
+ _copy_str_or_list(prepared_input),
418
449
  context_wrapper,
419
450
  ),
420
451
  self._run_single_turn(
@@ -562,7 +593,7 @@ class AgentRunner:
562
593
  )
563
594
 
564
595
  streamed_result = RunResultStreaming(
565
- input=copy.deepcopy(input),
596
+ input=_copy_str_or_list(input),
566
597
  new_items=[],
567
598
  current_agent=starting_agent,
568
599
  raw_responses=[],
@@ -593,6 +624,47 @@ class AgentRunner:
593
624
  )
594
625
  return streamed_result
595
626
 
627
+ @classmethod
628
+ async def _maybe_filter_model_input(
629
+ cls,
630
+ *,
631
+ agent: Agent[TContext],
632
+ run_config: RunConfig,
633
+ context_wrapper: RunContextWrapper[TContext],
634
+ input_items: list[TResponseInputItem],
635
+ system_instructions: str | None,
636
+ ) -> ModelInputData:
637
+ """Apply optional call_model_input_filter to modify model input.
638
+
639
+ Returns a `ModelInputData` that will be sent to the model.
640
+ """
641
+ effective_instructions = system_instructions
642
+ effective_input: list[TResponseInputItem] = input_items
643
+
644
+ if run_config.call_model_input_filter is None:
645
+ return ModelInputData(input=effective_input, instructions=effective_instructions)
646
+
647
+ try:
648
+ model_input = ModelInputData(
649
+ input=effective_input.copy(),
650
+ instructions=effective_instructions,
651
+ )
652
+ filter_payload: CallModelData[TContext] = CallModelData(
653
+ model_data=model_input,
654
+ agent=agent,
655
+ context=context_wrapper.context,
656
+ )
657
+ maybe_updated = run_config.call_model_input_filter(filter_payload)
658
+ updated = await maybe_updated if inspect.isawaitable(maybe_updated) else maybe_updated
659
+ if not isinstance(updated, ModelInputData):
660
+ raise UserError("call_model_input_filter must return a ModelInputData instance")
661
+ return updated
662
+ except Exception as e:
663
+ _error_tracing.attach_error_to_current_span(
664
+ SpanError(message="Error in call_model_input_filter", data={"error": str(e)})
665
+ )
666
+ raise
667
+
596
668
  @classmethod
597
669
  async def _run_input_guardrails_with_queue(
598
670
  cls,
@@ -713,7 +785,7 @@ class AgentRunner:
713
785
  cls._run_input_guardrails_with_queue(
714
786
  starting_agent,
715
787
  starting_agent.input_guardrails + (run_config.input_guardrails or []),
716
- copy.deepcopy(ItemHelpers.input_to_new_input_list(prepared_input)),
788
+ ItemHelpers.input_to_new_input_list(prepared_input),
717
789
  context_wrapper,
718
790
  streamed_result,
719
791
  current_span,
@@ -863,10 +935,18 @@ class AgentRunner:
863
935
  input = ItemHelpers.input_to_new_input_list(streamed_result.input)
864
936
  input.extend([item.to_input_item() for item in streamed_result.new_items])
865
937
 
938
+ filtered = await cls._maybe_filter_model_input(
939
+ agent=agent,
940
+ run_config=run_config,
941
+ context_wrapper=context_wrapper,
942
+ input_items=input,
943
+ system_instructions=system_prompt,
944
+ )
945
+
866
946
  # 1. Stream the output events
867
947
  async for event in model.stream_response(
868
- system_prompt,
869
- input,
948
+ filtered.instructions,
949
+ filtered.input,
870
950
  model_settings,
871
951
  all_tools,
872
952
  output_schema,
@@ -1034,7 +1114,6 @@ class AgentRunner:
1034
1114
  run_config: RunConfig,
1035
1115
  tool_use_tracker: AgentToolUseTracker,
1036
1116
  ) -> SingleStepResult:
1037
-
1038
1117
  original_input = streamed_result.input
1039
1118
  pre_step_items = streamed_result.new_items
1040
1119
  event_queue = streamed_result._event_queue
@@ -1161,13 +1240,22 @@ class AgentRunner:
1161
1240
  previous_response_id: str | None,
1162
1241
  prompt_config: ResponsePromptParam | None,
1163
1242
  ) -> ModelResponse:
1243
+ # Allow user to modify model input right before the call, if configured
1244
+ filtered = await cls._maybe_filter_model_input(
1245
+ agent=agent,
1246
+ run_config=run_config,
1247
+ context_wrapper=context_wrapper,
1248
+ input_items=input,
1249
+ system_instructions=system_prompt,
1250
+ )
1251
+
1164
1252
  model = cls._get_model(agent, run_config)
1165
1253
  model_settings = agent.model_settings.resolve(run_config.model_settings)
1166
1254
  model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
1167
1255
 
1168
1256
  new_response = await model.get_response(
1169
- system_instructions=system_prompt,
1170
- input=input,
1257
+ system_instructions=filtered.instructions,
1258
+ input=filtered.input,
1171
1259
  model_settings=model_settings,
1172
1260
  tools=all_tools,
1173
1261
  output_schema=output_schema,
@@ -1287,3 +1375,9 @@ class AgentRunner:
1287
1375
 
1288
1376
 
1289
1377
  DEFAULT_AGENT_RUNNER = AgentRunner()
1378
+
1379
+
1380
+ def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
1381
+ if isinstance(input, str):
1382
+ return input
1383
+ return input.copy()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -2,22 +2,22 @@ agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=8Bc8YIHzv8Qf40tUAcHV5qqUkGSUxSraNkV0Y5xLFFQ,44894
5
- agents/agent.py,sha256=zBhC_bL5WuAmXAHJTj_ZgN5Nxj8jq8vZspdX8B0do38,12648
5
+ agents/agent.py,sha256=jn_nV38eVLK3QYh7dUmKO1AocQOCCPaHEERaSVt0l8g,17574
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
9
  agents/function_schema.py,sha256=yZ3PEOmfy836Me_W4QlItMeFq2j4BtpuI2FmQswbIcQ,13590
10
10
  agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
11
  agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
12
- agents/items.py,sha256=ntrJ-HuqSMC8HtIwS9pcqHYXtiQ2TJB6lHR-bcvNn4c,9848
12
+ agents/items.py,sha256=aHo7KTXZLBcHSrKHWDaBB6L7XmBCAIekG5e0xOIhkyM,9828
13
13
  agents/lifecycle.py,sha256=sJwESHBHbml7rSYH360-P6x1bLyENcQWm4bT4rQcbuo,3129
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=SKCrfV5A7u0zaY8fh2PZRe08W5sEhArHC3YHpEfeip0,6357
15
+ agents/model_settings.py,sha256=7zGEGxfXtRHlst9qYngYJc5mkr2l_mi5YuQDGiQ-qXM,6485
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
19
19
  agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
20
- agents/run.py,sha256=Q0UcLVjlmWjpEvXpWm-0obDU5Gu5T9eJ7xW29wW-QEA,52453
20
+ agents/run.py,sha256=lPp-nZWKA0gg0E2ace94zwFo-FmQK7Fj_cXcrvAoFlQ,55978
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
23
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
@@ -45,9 +45,9 @@ agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6Nhbf
45
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
47
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
48
- agents/models/openai_chatcompletions.py,sha256=N_8U_rKRhB1pgMju29bOok1QFWF_EL7JoatlKzy7hLY,13102
48
+ agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
49
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
- agents/models/openai_responses.py,sha256=pBAHIwz_kq561bmzqMwz6L4dFd_R4V5C7R21xLBM__o,17048
50
+ agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
51
51
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
52
  agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
53
53
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
@@ -60,9 +60,9 @@ agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
60
60
  agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
61
61
  agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1t0,4034
62
62
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
63
- agents/realtime/openai_realtime.py,sha256=vgzgklFcRpB9ZfsDda7DtXlBn3NF6bZdysta1DwQhrM,30120
63
+ agents/realtime/openai_realtime.py,sha256=20yHhG-KAGeXY0M0ucty0wpRqXSUVLvoL5cs663NGrI,30201
64
64
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
65
- agents/realtime/session.py,sha256=yMHFhqhBKDHzlK-k6JTuqXKggMPW3dPt-aavqDoKsec,23375
65
+ agents/realtime/session.py,sha256=aGifsl_4LYGZBwXneaOo-H_46fdQs-CAtQD6DmJY2Uo,26560
66
66
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
67
67
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
68
68
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -97,7 +97,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
97
97
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
98
98
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
99
99
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
100
- openai_agents-0.2.6.dist-info/METADATA,sha256=E_Fnl2A-qaNEFT07zAH1lx7zIj-XVh7Wli5P5NhfjR0,12104
101
- openai_agents-0.2.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- openai_agents-0.2.6.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
- openai_agents-0.2.6.dist-info/RECORD,,
100
+ openai_agents-0.2.8.dist-info/METADATA,sha256=MqNemwQZlvUlnyqVHU1Bxor4liYOsBa4VcE_UJllEas,12104
101
+ openai_agents-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
+ openai_agents-0.2.8.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
+ openai_agents-0.2.8.dist-info/RECORD,,