openai-agents 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/agent.py CHANGED
@@ -223,6 +223,119 @@ class Agent(AgentBase, Generic[TContext]):
223
223
  """Whether to reset the tool choice to the default value after a tool has been called. Defaults
224
224
  to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
225
225
 
226
+ def __post_init__(self):
227
+ from typing import get_origin
228
+
229
+ if not isinstance(self.name, str):
230
+ raise TypeError(f"Agent name must be a string, got {type(self.name).__name__}")
231
+
232
+ if self.handoff_description is not None and not isinstance(self.handoff_description, str):
233
+ raise TypeError(
234
+ f"Agent handoff_description must be a string or None, "
235
+ f"got {type(self.handoff_description).__name__}"
236
+ )
237
+
238
+ if not isinstance(self.tools, list):
239
+ raise TypeError(f"Agent tools must be a list, got {type(self.tools).__name__}")
240
+
241
+ if not isinstance(self.mcp_servers, list):
242
+ raise TypeError(
243
+ f"Agent mcp_servers must be a list, got {type(self.mcp_servers).__name__}"
244
+ )
245
+
246
+ if not isinstance(self.mcp_config, dict):
247
+ raise TypeError(
248
+ f"Agent mcp_config must be a dict, got {type(self.mcp_config).__name__}"
249
+ )
250
+
251
+ if (
252
+ self.instructions is not None
253
+ and not isinstance(self.instructions, str)
254
+ and not callable(self.instructions)
255
+ ):
256
+ raise TypeError(
257
+ f"Agent instructions must be a string, callable, or None, "
258
+ f"got {type(self.instructions).__name__}"
259
+ )
260
+
261
+ if (
262
+ self.prompt is not None
263
+ and not callable(self.prompt)
264
+ and not hasattr(self.prompt, "get")
265
+ ):
266
+ raise TypeError(
267
+ f"Agent prompt must be a Prompt, DynamicPromptFunction, or None, "
268
+ f"got {type(self.prompt).__name__}"
269
+ )
270
+
271
+ if not isinstance(self.handoffs, list):
272
+ raise TypeError(f"Agent handoffs must be a list, got {type(self.handoffs).__name__}")
273
+
274
+ if self.model is not None and not isinstance(self.model, str):
275
+ from .models.interface import Model
276
+
277
+ if not isinstance(self.model, Model):
278
+ raise TypeError(
279
+ f"Agent model must be a string, Model, or None, got {type(self.model).__name__}"
280
+ )
281
+
282
+ if not isinstance(self.model_settings, ModelSettings):
283
+ raise TypeError(
284
+ f"Agent model_settings must be a ModelSettings instance, "
285
+ f"got {type(self.model_settings).__name__}"
286
+ )
287
+
288
+ if not isinstance(self.input_guardrails, list):
289
+ raise TypeError(
290
+ f"Agent input_guardrails must be a list, got {type(self.input_guardrails).__name__}"
291
+ )
292
+
293
+ if not isinstance(self.output_guardrails, list):
294
+ raise TypeError(
295
+ f"Agent output_guardrails must be a list, "
296
+ f"got {type(self.output_guardrails).__name__}"
297
+ )
298
+
299
+ if self.output_type is not None:
300
+ from .agent_output import AgentOutputSchemaBase
301
+
302
+ if not (
303
+ isinstance(self.output_type, (type, AgentOutputSchemaBase))
304
+ or get_origin(self.output_type) is not None
305
+ ):
306
+ raise TypeError(
307
+ f"Agent output_type must be a type, AgentOutputSchemaBase, or None, "
308
+ f"got {type(self.output_type).__name__}"
309
+ )
310
+
311
+ if self.hooks is not None:
312
+ from .lifecycle import AgentHooksBase
313
+
314
+ if not isinstance(self.hooks, AgentHooksBase):
315
+ raise TypeError(
316
+ f"Agent hooks must be an AgentHooks instance or None, "
317
+ f"got {type(self.hooks).__name__}"
318
+ )
319
+
320
+ if (
321
+ not (
322
+ isinstance(self.tool_use_behavior, str)
323
+ and self.tool_use_behavior in ["run_llm_again", "stop_on_first_tool"]
324
+ )
325
+ and not isinstance(self.tool_use_behavior, dict)
326
+ and not callable(self.tool_use_behavior)
327
+ ):
328
+ raise TypeError(
329
+ f"Agent tool_use_behavior must be 'run_llm_again', 'stop_on_first_tool', "
330
+ f"StopAtTools dict, or callable, got {type(self.tool_use_behavior).__name__}"
331
+ )
332
+
333
+ if not isinstance(self.reset_tool_choice, bool):
334
+ raise TypeError(
335
+ f"Agent reset_tool_choice must be a boolean, "
336
+ f"got {type(self.reset_tool_choice).__name__}"
337
+ )
338
+
226
339
  def clone(self, **kwargs: Any) -> Agent[TContext]:
227
340
  """Make a copy of the agent, with the given arguments changed.
228
341
  Notes:
@@ -280,16 +393,31 @@ class Agent(AgentBase, Generic[TContext]):
280
393
  return run_agent
281
394
 
282
395
  async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> str | None:
283
- """Get the system prompt for the agent."""
284
396
  if isinstance(self.instructions, str):
285
397
  return self.instructions
286
398
  elif callable(self.instructions):
399
+ # Inspect the signature of the instructions function
400
+ sig = inspect.signature(self.instructions)
401
+ params = list(sig.parameters.values())
402
+
403
+ # Enforce exactly 2 parameters
404
+ if len(params) != 2:
405
+ raise TypeError(
406
+ f"'instructions' callable must accept exactly 2 arguments (context, agent), "
407
+ f"but got {len(params)}: {[p.name for p in params]}"
408
+ )
409
+
410
+ # Call the instructions function properly
287
411
  if inspect.iscoroutinefunction(self.instructions):
288
412
  return await cast(Awaitable[str], self.instructions(run_context, self))
289
413
  else:
290
414
  return cast(str, self.instructions(run_context, self))
415
+
291
416
  elif self.instructions is not None:
292
- logger.error(f"Instructions must be a string or a function, got {self.instructions}")
417
+ logger.error(
418
+ f"Instructions must be a string or a callable function, "
419
+ f"got {type(self.instructions).__name__}"
420
+ )
293
421
 
294
422
  return None
295
423
 
agents/model_settings.py CHANGED
@@ -102,6 +102,10 @@ class ModelSettings:
102
102
  [reasoning models](https://platform.openai.com/docs/guides/reasoning).
103
103
  """
104
104
 
105
+ verbosity: Literal["low", "medium", "high"] | None = None
106
+ """Constrains the verbosity of the model's response.
107
+ """
108
+
105
109
  metadata: dict[str, str] | None = None
106
110
  """Metadata to include with the model response call."""
107
111
 
@@ -287,6 +287,7 @@ class OpenAIChatCompletionsModel(Model):
287
287
  stream_options=self._non_null_or_not_given(stream_options),
288
288
  store=self._non_null_or_not_given(store),
289
289
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
290
+ verbosity=self._non_null_or_not_given(model_settings.verbosity),
290
291
  top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
291
292
  extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
292
293
  extra_query=model_settings.extra_query,
@@ -270,6 +270,11 @@ class OpenAIResponsesModel(Model):
270
270
  extra_args = dict(model_settings.extra_args or {})
271
271
  if model_settings.top_logprobs is not None:
272
272
  extra_args["top_logprobs"] = model_settings.top_logprobs
273
+ if model_settings.verbosity is not None:
274
+ if response_format != NOT_GIVEN:
275
+ response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
276
+ else:
277
+ response_format = {"verbosity": model_settings.verbosity}
273
278
 
274
279
  return await self._client.responses.create(
275
280
  previous_response_id=self._non_null_or_not_given(previous_response_id),
@@ -98,7 +98,7 @@ class RealtimeSession(RealtimeModelListener):
98
98
  self._stored_exception: Exception | None = None
99
99
 
100
100
  # Guardrails state tracking
101
- self._interrupted_by_guardrail = False
101
+ self._interrupted_response_ids: set[str] = set()
102
102
  self._item_transcripts: dict[str, str] = {} # item_id -> accumulated transcript
103
103
  self._item_guardrail_run_counts: dict[str, int] = {} # item_id -> run count
104
104
  self._debounce_text_length = self._run_config.get("guardrails_settings", {}).get(
@@ -242,7 +242,8 @@ class RealtimeSession(RealtimeModelListener):
242
242
 
243
243
  if current_length >= next_run_threshold:
244
244
  self._item_guardrail_run_counts[item_id] += 1
245
- self._enqueue_guardrail_task(self._item_transcripts[item_id])
245
+ # Pass response_id so we can ensure only a single interrupt per response
246
+ self._enqueue_guardrail_task(self._item_transcripts[item_id], event.response_id)
246
247
  elif event.type == "item_updated":
247
248
  is_new = not any(item.item_id == event.item.item_id for item in self._history)
248
249
  self._history = self._get_new_history(self._history, event.item)
@@ -274,7 +275,6 @@ class RealtimeSession(RealtimeModelListener):
274
275
  # Clear guardrail state for next turn
275
276
  self._item_transcripts.clear()
276
277
  self._item_guardrail_run_counts.clear()
277
- self._interrupted_by_guardrail = False
278
278
 
279
279
  await self._put_event(
280
280
  RealtimeAgentEndEvent(
@@ -442,7 +442,7 @@ class RealtimeSession(RealtimeModelListener):
442
442
  # Otherwise, add it to the end
443
443
  return old_history + [event]
444
444
 
445
- async def _run_output_guardrails(self, text: str) -> bool:
445
+ async def _run_output_guardrails(self, text: str, response_id: str) -> bool:
446
446
  """Run output guardrails on the given text. Returns True if any guardrail was triggered."""
447
447
  combined_guardrails = self._current_agent.output_guardrails + self._run_config.get(
448
448
  "output_guardrails", []
@@ -455,7 +455,8 @@ class RealtimeSession(RealtimeModelListener):
455
455
  output_guardrails.append(guardrail)
456
456
  seen_ids.add(guardrail_id)
457
457
 
458
- if not output_guardrails or self._interrupted_by_guardrail:
458
+ # If we've already interrupted this response, skip
459
+ if not output_guardrails or response_id in self._interrupted_response_ids:
459
460
  return False
460
461
 
461
462
  triggered_results = []
@@ -475,8 +476,12 @@ class RealtimeSession(RealtimeModelListener):
475
476
  continue
476
477
 
477
478
  if triggered_results:
478
- # Mark as interrupted to prevent multiple interrupts
479
- self._interrupted_by_guardrail = True
479
+ # Double-check: bail if already interrupted for this response
480
+ if response_id in self._interrupted_response_ids:
481
+ return False
482
+
483
+ # Mark as interrupted immediately (before any awaits) to minimize race window
484
+ self._interrupted_response_ids.add(response_id)
480
485
 
481
486
  # Emit guardrail tripped event
482
487
  await self._put_event(
@@ -502,10 +507,10 @@ class RealtimeSession(RealtimeModelListener):
502
507
 
503
508
  return False
504
509
 
505
- def _enqueue_guardrail_task(self, text: str) -> None:
510
+ def _enqueue_guardrail_task(self, text: str, response_id: str) -> None:
506
511
  # Runs the guardrails in a separate task to avoid blocking the main loop
507
512
 
508
- task = asyncio.create_task(self._run_output_guardrails(text))
513
+ task = asyncio.create_task(self._run_output_guardrails(text, response_id))
509
514
  self._guardrail_tasks.add(task)
510
515
 
511
516
  # Add callback to remove completed tasks and handle exceptions
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -2,7 +2,7 @@ agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=8Bc8YIHzv8Qf40tUAcHV5qqUkGSUxSraNkV0Y5xLFFQ,44894
5
- agents/agent.py,sha256=zBhC_bL5WuAmXAHJTj_ZgN5Nxj8jq8vZspdX8B0do38,12648
5
+ agents/agent.py,sha256=jn_nV38eVLK3QYh7dUmKO1AocQOCCPaHEERaSVt0l8g,17574
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
@@ -12,7 +12,7 @@ agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
12
12
  agents/items.py,sha256=ntrJ-HuqSMC8HtIwS9pcqHYXtiQ2TJB6lHR-bcvNn4c,9848
13
13
  agents/lifecycle.py,sha256=sJwESHBHbml7rSYH360-P6x1bLyENcQWm4bT4rQcbuo,3129
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=SKCrfV5A7u0zaY8fh2PZRe08W5sEhArHC3YHpEfeip0,6357
15
+ agents/model_settings.py,sha256=7zGEGxfXtRHlst9qYngYJc5mkr2l_mi5YuQDGiQ-qXM,6485
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
@@ -45,9 +45,9 @@ agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6Nhbf
45
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
47
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
48
- agents/models/openai_chatcompletions.py,sha256=N_8U_rKRhB1pgMju29bOok1QFWF_EL7JoatlKzy7hLY,13102
48
+ agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
49
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
- agents/models/openai_responses.py,sha256=pBAHIwz_kq561bmzqMwz6L4dFd_R4V5C7R21xLBM__o,17048
50
+ agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
51
51
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
52
  agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
53
53
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
@@ -62,7 +62,7 @@ agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1
62
62
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
63
63
  agents/realtime/openai_realtime.py,sha256=vgzgklFcRpB9ZfsDda7DtXlBn3NF6bZdysta1DwQhrM,30120
64
64
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
65
- agents/realtime/session.py,sha256=yMHFhqhBKDHzlK-k6JTuqXKggMPW3dPt-aavqDoKsec,23375
65
+ agents/realtime/session.py,sha256=EmbjWBoIw-1RAPICZbWtQ5OUaZh14xPXPwjHWXDU8c4,23766
66
66
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
67
67
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
68
68
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -97,7 +97,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
97
97
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
98
98
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
99
99
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
100
- openai_agents-0.2.6.dist-info/METADATA,sha256=E_Fnl2A-qaNEFT07zAH1lx7zIj-XVh7Wli5P5NhfjR0,12104
101
- openai_agents-0.2.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- openai_agents-0.2.6.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
- openai_agents-0.2.6.dist-info/RECORD,,
100
+ openai_agents-0.2.7.dist-info/METADATA,sha256=AusANdnHsmV0VjQRDtmRQ3j5Ql8oT4rUKaqgZiR0Hzg,12104
101
+ openai_agents-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
+ openai_agents-0.2.7.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
+ openai_agents-0.2.7.dist-info/RECORD,,