openai-agents 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -136,6 +136,7 @@ class _InputAudioBufferTimeoutTriggeredEvent(BaseModel):
136
136
  audio_end_ms: int
137
137
  item_id: str
138
138
 
139
+
139
140
  AllRealtimeServerEvents = Annotated[
140
141
  Union[
141
142
  OpenAIRealtimeServerEvent,
@@ -144,6 +145,15 @@ AllRealtimeServerEvents = Annotated[
144
145
  Field(discriminator="type"),
145
146
  ]
146
147
 
148
+ ServerEventTypeAdapter: TypeAdapter[AllRealtimeServerEvents] | None = None
149
+
150
+
151
+ def get_server_event_type_adapter() -> TypeAdapter[AllRealtimeServerEvents]:
152
+ global ServerEventTypeAdapter
153
+ if not ServerEventTypeAdapter:
154
+ ServerEventTypeAdapter = TypeAdapter(AllRealtimeServerEvents)
155
+ return ServerEventTypeAdapter
156
+
147
157
 
148
158
  class OpenAIRealtimeWebSocketModel(RealtimeModel):
149
159
  """A model that uses OpenAI's WebSocket API."""
@@ -159,6 +169,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
159
169
  self._tracing_config: RealtimeModelTracingConfig | Literal["auto"] | None = None
160
170
  self._playback_tracker: RealtimePlaybackTracker | None = None
161
171
  self._created_session: OpenAISessionObject | None = None
172
+ self._server_event_type_adapter = get_server_event_type_adapter()
162
173
 
163
174
  async def connect(self, options: RealtimeModelConfig) -> None:
164
175
  """Establish a connection to the model and keep it alive."""
@@ -177,15 +188,23 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
177
188
  else:
178
189
  self._tracing_config = "auto"
179
190
 
180
- if not api_key:
181
- raise UserError("API key is required but was not provided.")
182
-
183
191
  url = options.get("url", f"wss://api.openai.com/v1/realtime?model={self.model}")
184
192
 
185
- headers = {
186
- "Authorization": f"Bearer {api_key}",
187
- "OpenAI-Beta": "realtime=v1",
188
- }
193
+ headers: dict[str, str] = {}
194
+ if options.get("headers") is not None:
195
+ # For customizing request headers
196
+ headers.update(options["headers"])
197
+ else:
198
+ # OpenAI's Realtime API
199
+ if not api_key:
200
+ raise UserError("API key is required but was not provided.")
201
+
202
+ headers.update(
203
+ {
204
+ "Authorization": f"Bearer {api_key}",
205
+ "OpenAI-Beta": "realtime=v1",
206
+ }
207
+ )
189
208
  self._websocket = await websockets.connect(
190
209
  url,
191
210
  user_agent_header=_USER_AGENT,
@@ -479,9 +498,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
479
498
  try:
480
499
  if "previous_item_id" in event and event["previous_item_id"] is None:
481
500
  event["previous_item_id"] = "" # TODO (rm) remove
482
- parsed: AllRealtimeServerEvents = TypeAdapter(
483
- AllRealtimeServerEvents
484
- ).validate_python(event)
501
+ parsed: AllRealtimeServerEvents = self._server_event_type_adapter.validate_python(event)
485
502
  except pydantic.ValidationError as e:
486
503
  logger.error(f"Failed to validate server event: {event}", exc_info=True)
487
504
  await self._emit_event(
@@ -572,11 +589,13 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
572
589
  ):
573
590
  await self._handle_output_item(parsed.item)
574
591
  elif parsed.type == "input_audio_buffer.timeout_triggered":
575
- await self._emit_event(RealtimeModelInputAudioTimeoutTriggeredEvent(
576
- item_id=parsed.item_id,
577
- audio_start_ms=parsed.audio_start_ms,
578
- audio_end_ms=parsed.audio_end_ms,
579
- ))
592
+ await self._emit_event(
593
+ RealtimeModelInputAudioTimeoutTriggeredEvent(
594
+ item_id=parsed.item_id,
595
+ audio_start_ms=parsed.audio_start_ms,
596
+ audio_end_ms=parsed.audio_end_ms,
597
+ )
598
+ )
580
599
 
581
600
  def _update_created_session(self, session: OpenAISessionObject) -> None:
582
601
  self._created_session = session
agents/run.py CHANGED
@@ -2,10 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import inspect
5
+ import os
5
6
  from dataclasses import dataclass, field
6
- from typing import Any, Callable, Generic, cast
7
+ from typing import Any, Callable, Generic, cast, get_args
7
8
 
8
- from openai.types.responses import ResponseCompletedEvent
9
+ from openai.types.responses import (
10
+ ResponseCompletedEvent,
11
+ ResponseOutputItemDoneEvent,
12
+ )
9
13
  from openai.types.responses.response_prompt_param import (
10
14
  ResponsePromptParam,
11
15
  )
@@ -40,7 +44,14 @@ from .guardrail import (
40
44
  OutputGuardrailResult,
41
45
  )
42
46
  from .handoffs import Handoff, HandoffInputFilter, handoff
43
- from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
47
+ from .items import (
48
+ ItemHelpers,
49
+ ModelResponse,
50
+ RunItem,
51
+ ToolCallItem,
52
+ ToolCallItemTypes,
53
+ TResponseInputItem,
54
+ )
44
55
  from .lifecycle import RunHooks
45
56
  from .logger import logger
46
57
  from .memory import Session
@@ -49,7 +60,7 @@ from .models.interface import Model, ModelProvider
49
60
  from .models.multi_provider import MultiProvider
50
61
  from .result import RunResult, RunResultStreaming
51
62
  from .run_context import RunContextWrapper, TContext
52
- from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
63
+ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
53
64
  from .tool import Tool
54
65
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
55
66
  from .tracing.span_data import AgentSpanData
@@ -81,6 +92,12 @@ def get_default_agent_runner() -> AgentRunner:
81
92
  return DEFAULT_AGENT_RUNNER
82
93
 
83
94
 
95
+ def _default_trace_include_sensitive_data() -> bool:
96
+ """Returns the default value for trace_include_sensitive_data based on environment variable."""
97
+ val = os.getenv("OPENAI_AGENTS_TRACE_INCLUDE_SENSITIVE_DATA", "true")
98
+ return val.strip().lower() in ("1", "true", "yes", "on")
99
+
100
+
84
101
  @dataclass
85
102
  class ModelInputData:
86
103
  """Container for the data that will be sent to the model."""
@@ -135,7 +152,9 @@ class RunConfig:
135
152
  """Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
136
153
  """
137
154
 
138
- trace_include_sensitive_data: bool = True
155
+ trace_include_sensitive_data: bool = field(
156
+ default_factory=_default_trace_include_sensitive_data
157
+ )
139
158
  """Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
140
159
  LLM generations) in traces. If False, we'll still create spans for these events, but the
141
160
  sensitive data will not be included.
@@ -189,6 +208,9 @@ class RunOptions(TypedDict, Generic[TContext]):
189
208
  previous_response_id: NotRequired[str | None]
190
209
  """The ID of the previous response, if any."""
191
210
 
211
+ conversation_id: NotRequired[str | None]
212
+ """The ID of the stored conversation, if any."""
213
+
192
214
  session: NotRequired[Session | None]
193
215
  """The session for the run."""
194
216
 
@@ -205,6 +227,7 @@ class Runner:
205
227
  hooks: RunHooks[TContext] | None = None,
206
228
  run_config: RunConfig | None = None,
207
229
  previous_response_id: str | None = None,
230
+ conversation_id: str | None = None,
208
231
  session: Session | None = None,
209
232
  ) -> RunResult:
210
233
  """Run a workflow starting at the given agent. The agent will run in a loop until a final
@@ -229,6 +252,13 @@ class Runner:
229
252
  run_config: Global settings for the entire agent run.
230
253
  previous_response_id: The ID of the previous response, if using OpenAI models via the
231
254
  Responses API, this allows you to skip passing in input from the previous turn.
255
+ conversation_id: The conversation ID (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
256
+ If provided, the conversation will be used to read and write items.
257
+ Every agent will have access to the conversation history so far,
258
+ and it's output items will be written to the conversation.
259
+ We recommend only using this if you are exclusively using OpenAI models;
260
+ other model providers don't write to the Conversation object,
261
+ so you'll end up having partial conversations stored.
232
262
  Returns:
233
263
  A run result containing all the inputs, guardrail results and the output of the last
234
264
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -242,6 +272,7 @@ class Runner:
242
272
  hooks=hooks,
243
273
  run_config=run_config,
244
274
  previous_response_id=previous_response_id,
275
+ conversation_id=conversation_id,
245
276
  session=session,
246
277
  )
247
278
 
@@ -256,6 +287,7 @@ class Runner:
256
287
  hooks: RunHooks[TContext] | None = None,
257
288
  run_config: RunConfig | None = None,
258
289
  previous_response_id: str | None = None,
290
+ conversation_id: str | None = None,
259
291
  session: Session | None = None,
260
292
  ) -> RunResult:
261
293
  """Run a workflow synchronously, starting at the given agent. Note that this just wraps the
@@ -283,6 +315,7 @@ class Runner:
283
315
  run_config: Global settings for the entire agent run.
284
316
  previous_response_id: The ID of the previous response, if using OpenAI models via the
285
317
  Responses API, this allows you to skip passing in input from the previous turn.
318
+ conversation_id: The ID of the stored conversation, if any.
286
319
  Returns:
287
320
  A run result containing all the inputs, guardrail results and the output of the last
288
321
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -296,6 +329,7 @@ class Runner:
296
329
  hooks=hooks,
297
330
  run_config=run_config,
298
331
  previous_response_id=previous_response_id,
332
+ conversation_id=conversation_id,
299
333
  session=session,
300
334
  )
301
335
 
@@ -309,6 +343,7 @@ class Runner:
309
343
  hooks: RunHooks[TContext] | None = None,
310
344
  run_config: RunConfig | None = None,
311
345
  previous_response_id: str | None = None,
346
+ conversation_id: str | None = None,
312
347
  session: Session | None = None,
313
348
  ) -> RunResultStreaming:
314
349
  """Run a workflow starting at the given agent in streaming mode. The returned result object
@@ -334,6 +369,7 @@ class Runner:
334
369
  run_config: Global settings for the entire agent run.
335
370
  previous_response_id: The ID of the previous response, if using OpenAI models via the
336
371
  Responses API, this allows you to skip passing in input from the previous turn.
372
+ conversation_id: The ID of the stored conversation, if any.
337
373
  Returns:
338
374
  A result object that contains data about the run, as well as a method to stream events.
339
375
  """
@@ -346,6 +382,7 @@ class Runner:
346
382
  hooks=hooks,
347
383
  run_config=run_config,
348
384
  previous_response_id=previous_response_id,
385
+ conversation_id=conversation_id,
349
386
  session=session,
350
387
  )
351
388
 
@@ -367,6 +404,7 @@ class AgentRunner:
367
404
  hooks = kwargs.get("hooks")
368
405
  run_config = kwargs.get("run_config")
369
406
  previous_response_id = kwargs.get("previous_response_id")
407
+ conversation_id = kwargs.get("conversation_id")
370
408
  session = kwargs.get("session")
371
409
  if hooks is None:
372
410
  hooks = RunHooks[Any]()
@@ -459,6 +497,7 @@ class AgentRunner:
459
497
  should_run_agent_start_hooks=should_run_agent_start_hooks,
460
498
  tool_use_tracker=tool_use_tracker,
461
499
  previous_response_id=previous_response_id,
500
+ conversation_id=conversation_id,
462
501
  ),
463
502
  )
464
503
  else:
@@ -473,6 +512,7 @@ class AgentRunner:
473
512
  should_run_agent_start_hooks=should_run_agent_start_hooks,
474
513
  tool_use_tracker=tool_use_tracker,
475
514
  previous_response_id=previous_response_id,
515
+ conversation_id=conversation_id,
476
516
  )
477
517
  should_run_agent_start_hooks = False
478
518
 
@@ -539,6 +579,7 @@ class AgentRunner:
539
579
  hooks = kwargs.get("hooks")
540
580
  run_config = kwargs.get("run_config")
541
581
  previous_response_id = kwargs.get("previous_response_id")
582
+ conversation_id = kwargs.get("conversation_id")
542
583
  session = kwargs.get("session")
543
584
 
544
585
  return asyncio.get_event_loop().run_until_complete(
@@ -551,6 +592,7 @@ class AgentRunner:
551
592
  hooks=hooks,
552
593
  run_config=run_config,
553
594
  previous_response_id=previous_response_id,
595
+ conversation_id=conversation_id,
554
596
  )
555
597
  )
556
598
 
@@ -565,6 +607,7 @@ class AgentRunner:
565
607
  hooks = kwargs.get("hooks")
566
608
  run_config = kwargs.get("run_config")
567
609
  previous_response_id = kwargs.get("previous_response_id")
610
+ conversation_id = kwargs.get("conversation_id")
568
611
  session = kwargs.get("session")
569
612
 
570
613
  if hooks is None:
@@ -619,6 +662,7 @@ class AgentRunner:
619
662
  context_wrapper=context_wrapper,
620
663
  run_config=run_config,
621
664
  previous_response_id=previous_response_id,
665
+ conversation_id=conversation_id,
622
666
  session=session,
623
667
  )
624
668
  )
@@ -719,6 +763,7 @@ class AgentRunner:
719
763
  context_wrapper: RunContextWrapper[TContext],
720
764
  run_config: RunConfig,
721
765
  previous_response_id: str | None,
766
+ conversation_id: str | None,
722
767
  session: Session | None,
723
768
  ):
724
769
  if streamed_result.trace:
@@ -802,6 +847,7 @@ class AgentRunner:
802
847
  tool_use_tracker,
803
848
  all_tools,
804
849
  previous_response_id,
850
+ conversation_id,
805
851
  )
806
852
  should_run_agent_start_hooks = False
807
853
 
@@ -904,7 +950,10 @@ class AgentRunner:
904
950
  tool_use_tracker: AgentToolUseTracker,
905
951
  all_tools: list[Tool],
906
952
  previous_response_id: str | None,
953
+ conversation_id: str | None,
907
954
  ) -> SingleStepResult:
955
+ emitted_tool_call_ids: set[str] = set()
956
+
908
957
  if should_run_agent_start_hooks:
909
958
  await asyncio.gather(
910
959
  hooks.on_agent_start(context_wrapper, agent),
@@ -945,10 +994,16 @@ class AgentRunner:
945
994
  )
946
995
 
947
996
  # Call hook just before the model is invoked, with the correct system_prompt.
948
- if agent.hooks:
949
- await agent.hooks.on_llm_start(
950
- context_wrapper, agent, filtered.instructions, filtered.input
951
- )
997
+ await asyncio.gather(
998
+ hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input),
999
+ (
1000
+ agent.hooks.on_llm_start(
1001
+ context_wrapper, agent, filtered.instructions, filtered.input
1002
+ )
1003
+ if agent.hooks
1004
+ else _coro.noop_coroutine()
1005
+ ),
1006
+ )
952
1007
 
953
1008
  # 1. Stream the output events
954
1009
  async for event in model.stream_response(
@@ -962,6 +1017,7 @@ class AgentRunner:
962
1017
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
963
1018
  ),
964
1019
  previous_response_id=previous_response_id,
1020
+ conversation_id=conversation_id,
965
1021
  prompt=prompt_config,
966
1022
  ):
967
1023
  if isinstance(event, ResponseCompletedEvent):
@@ -984,20 +1040,47 @@ class AgentRunner:
984
1040
  )
985
1041
  context_wrapper.usage.add(usage)
986
1042
 
1043
+ if isinstance(event, ResponseOutputItemDoneEvent):
1044
+ output_item = event.item
1045
+
1046
+ if isinstance(output_item, _TOOL_CALL_TYPES):
1047
+ call_id: str | None = getattr(
1048
+ output_item, "call_id", getattr(output_item, "id", None)
1049
+ )
1050
+
1051
+ if call_id and call_id not in emitted_tool_call_ids:
1052
+ emitted_tool_call_ids.add(call_id)
1053
+
1054
+ tool_item = ToolCallItem(
1055
+ raw_item=cast(ToolCallItemTypes, output_item),
1056
+ agent=agent,
1057
+ )
1058
+ streamed_result._event_queue.put_nowait(
1059
+ RunItemStreamEvent(item=tool_item, name="tool_called")
1060
+ )
1061
+
987
1062
  streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
988
1063
 
989
1064
  # Call hook just after the model response is finalized.
990
- if agent.hooks and final_response is not None:
991
- await agent.hooks.on_llm_end(context_wrapper, agent, final_response)
1065
+ if final_response is not None:
1066
+ await asyncio.gather(
1067
+ (
1068
+ agent.hooks.on_llm_end(context_wrapper, agent, final_response)
1069
+ if agent.hooks
1070
+ else _coro.noop_coroutine()
1071
+ ),
1072
+ hooks.on_llm_end(context_wrapper, agent, final_response),
1073
+ )
992
1074
 
993
1075
  # 2. At this point, the streaming is complete for this turn of the agent loop.
994
1076
  if not final_response:
995
1077
  raise ModelBehaviorError("Model did not produce a final response!")
996
1078
 
997
1079
  # 3. Now, we can process the turn as we do in the non-streaming case
998
- return await cls._get_single_step_result_from_streamed_response(
1080
+ single_step_result = await cls._get_single_step_result_from_response(
999
1081
  agent=agent,
1000
- streamed_result=streamed_result,
1082
+ original_input=streamed_result.input,
1083
+ pre_step_items=streamed_result.new_items,
1001
1084
  new_response=final_response,
1002
1085
  output_schema=output_schema,
1003
1086
  all_tools=all_tools,
@@ -1008,6 +1091,34 @@ class AgentRunner:
1008
1091
  tool_use_tracker=tool_use_tracker,
1009
1092
  )
1010
1093
 
1094
+ if emitted_tool_call_ids:
1095
+ import dataclasses as _dc
1096
+
1097
+ filtered_items = [
1098
+ item
1099
+ for item in single_step_result.new_step_items
1100
+ if not (
1101
+ isinstance(item, ToolCallItem)
1102
+ and (
1103
+ call_id := getattr(
1104
+ item.raw_item, "call_id", getattr(item.raw_item, "id", None)
1105
+ )
1106
+ )
1107
+ and call_id in emitted_tool_call_ids
1108
+ )
1109
+ ]
1110
+
1111
+ single_step_result_filtered = _dc.replace(
1112
+ single_step_result, new_step_items=filtered_items
1113
+ )
1114
+
1115
+ RunImpl.stream_step_result_to_queue(
1116
+ single_step_result_filtered, streamed_result._event_queue
1117
+ )
1118
+ else:
1119
+ RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
1120
+ return single_step_result
1121
+
1011
1122
  @classmethod
1012
1123
  async def _run_single_turn(
1013
1124
  cls,
@@ -1022,6 +1133,7 @@ class AgentRunner:
1022
1133
  should_run_agent_start_hooks: bool,
1023
1134
  tool_use_tracker: AgentToolUseTracker,
1024
1135
  previous_response_id: str | None,
1136
+ conversation_id: str | None,
1025
1137
  ) -> SingleStepResult:
1026
1138
  # Ensure we run the hooks before anything else
1027
1139
  if should_run_agent_start_hooks:
@@ -1051,10 +1163,12 @@ class AgentRunner:
1051
1163
  output_schema,
1052
1164
  all_tools,
1053
1165
  handoffs,
1166
+ hooks,
1054
1167
  context_wrapper,
1055
1168
  run_config,
1056
1169
  tool_use_tracker,
1057
1170
  previous_response_id,
1171
+ conversation_id,
1058
1172
  prompt_config,
1059
1173
  )
1060
1174
 
@@ -1245,10 +1359,12 @@ class AgentRunner:
1245
1359
  output_schema: AgentOutputSchemaBase | None,
1246
1360
  all_tools: list[Tool],
1247
1361
  handoffs: list[Handoff],
1362
+ hooks: RunHooks[TContext],
1248
1363
  context_wrapper: RunContextWrapper[TContext],
1249
1364
  run_config: RunConfig,
1250
1365
  tool_use_tracker: AgentToolUseTracker,
1251
1366
  previous_response_id: str | None,
1367
+ conversation_id: str | None,
1252
1368
  prompt_config: ResponsePromptParam | None,
1253
1369
  ) -> ModelResponse:
1254
1370
  # Allow user to modify model input right before the call, if configured
@@ -1263,14 +1379,21 @@ class AgentRunner:
1263
1379
  model = cls._get_model(agent, run_config)
1264
1380
  model_settings = agent.model_settings.resolve(run_config.model_settings)
1265
1381
  model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
1266
- # If the agent has hooks, we need to call them before and after the LLM call
1267
- if agent.hooks:
1268
- await agent.hooks.on_llm_start(
1269
- context_wrapper,
1270
- agent,
1271
- filtered.instructions, # Use filtered instructions
1272
- filtered.input, # Use filtered input
1273
- )
1382
+
1383
+ # If we have run hooks, or if the agent has hooks, we need to call them before the LLM call
1384
+ await asyncio.gather(
1385
+ hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input),
1386
+ (
1387
+ agent.hooks.on_llm_start(
1388
+ context_wrapper,
1389
+ agent,
1390
+ filtered.instructions, # Use filtered instructions
1391
+ filtered.input, # Use filtered input
1392
+ )
1393
+ if agent.hooks
1394
+ else _coro.noop_coroutine()
1395
+ ),
1396
+ )
1274
1397
 
1275
1398
  new_response = await model.get_response(
1276
1399
  system_instructions=filtered.instructions,
@@ -1283,14 +1406,22 @@ class AgentRunner:
1283
1406
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
1284
1407
  ),
1285
1408
  previous_response_id=previous_response_id,
1409
+ conversation_id=conversation_id,
1286
1410
  prompt=prompt_config,
1287
1411
  )
1288
- # If the agent has hooks, we need to call them after the LLM call
1289
- if agent.hooks:
1290
- await agent.hooks.on_llm_end(context_wrapper, agent, new_response)
1291
1412
 
1292
1413
  context_wrapper.usage.add(new_response.usage)
1293
1414
 
1415
+ # If we have run hooks, or if the agent has hooks, we need to call them after the LLM call
1416
+ await asyncio.gather(
1417
+ (
1418
+ agent.hooks.on_llm_end(context_wrapper, agent, new_response)
1419
+ if agent.hooks
1420
+ else _coro.noop_coroutine()
1421
+ ),
1422
+ hooks.on_llm_end(context_wrapper, agent, new_response),
1423
+ )
1424
+
1294
1425
  return new_response
1295
1426
 
1296
1427
  @classmethod
@@ -1397,6 +1528,7 @@ class AgentRunner:
1397
1528
 
1398
1529
 
1399
1530
  DEFAULT_AGENT_RUNNER = AgentRunner()
1531
+ _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
1400
1532
 
1401
1533
 
1402
1534
  def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
agents/tool.py CHANGED
@@ -13,6 +13,7 @@ from openai.types.responses.response_computer_tool_call import (
13
13
  )
14
14
  from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
15
15
  from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
16
+ from openai.types.responses.web_search_tool import Filters as WebSearchToolFilters
16
17
  from openai.types.responses.web_search_tool_param import UserLocation
17
18
  from pydantic import ValidationError
18
19
  from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
@@ -133,6 +134,9 @@ class WebSearchTool:
133
134
  user_location: UserLocation | None = None
134
135
  """Optional location for the search. Lets you customize results to be relevant to a location."""
135
136
 
137
+ filters: WebSearchToolFilters | None = None
138
+ """A filter to apply based on file attributes."""
139
+
136
140
  search_context_size: Literal["low", "medium", "high"] = "medium"
137
141
  """The amount of context to use for the search."""
138
142
 
@@ -70,8 +70,8 @@ class BackendSpanExporter(TracingExporter):
70
70
  client.
71
71
  """
72
72
  # Clear the cached property if it exists
73
- if 'api_key' in self.__dict__:
74
- del self.__dict__['api_key']
73
+ if "api_key" in self.__dict__:
74
+ del self.__dict__["api_key"]
75
75
 
76
76
  # Update the private attribute
77
77
  self._api_key = api_key
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.9
3
+ Version: 0.2.11
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
23
  Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.99.6
24
+ Requires-Dist: openai<2,>=1.104.1
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0