openai-agents 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/agent.py CHANGED
@@ -223,6 +223,119 @@ class Agent(AgentBase, Generic[TContext]):
223
223
  """Whether to reset the tool choice to the default value after a tool has been called. Defaults
224
224
  to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
225
225
 
226
+ def __post_init__(self):
227
+ from typing import get_origin
228
+
229
+ if not isinstance(self.name, str):
230
+ raise TypeError(f"Agent name must be a string, got {type(self.name).__name__}")
231
+
232
+ if self.handoff_description is not None and not isinstance(self.handoff_description, str):
233
+ raise TypeError(
234
+ f"Agent handoff_description must be a string or None, "
235
+ f"got {type(self.handoff_description).__name__}"
236
+ )
237
+
238
+ if not isinstance(self.tools, list):
239
+ raise TypeError(f"Agent tools must be a list, got {type(self.tools).__name__}")
240
+
241
+ if not isinstance(self.mcp_servers, list):
242
+ raise TypeError(
243
+ f"Agent mcp_servers must be a list, got {type(self.mcp_servers).__name__}"
244
+ )
245
+
246
+ if not isinstance(self.mcp_config, dict):
247
+ raise TypeError(
248
+ f"Agent mcp_config must be a dict, got {type(self.mcp_config).__name__}"
249
+ )
250
+
251
+ if (
252
+ self.instructions is not None
253
+ and not isinstance(self.instructions, str)
254
+ and not callable(self.instructions)
255
+ ):
256
+ raise TypeError(
257
+ f"Agent instructions must be a string, callable, or None, "
258
+ f"got {type(self.instructions).__name__}"
259
+ )
260
+
261
+ if (
262
+ self.prompt is not None
263
+ and not callable(self.prompt)
264
+ and not hasattr(self.prompt, "get")
265
+ ):
266
+ raise TypeError(
267
+ f"Agent prompt must be a Prompt, DynamicPromptFunction, or None, "
268
+ f"got {type(self.prompt).__name__}"
269
+ )
270
+
271
+ if not isinstance(self.handoffs, list):
272
+ raise TypeError(f"Agent handoffs must be a list, got {type(self.handoffs).__name__}")
273
+
274
+ if self.model is not None and not isinstance(self.model, str):
275
+ from .models.interface import Model
276
+
277
+ if not isinstance(self.model, Model):
278
+ raise TypeError(
279
+ f"Agent model must be a string, Model, or None, got {type(self.model).__name__}"
280
+ )
281
+
282
+ if not isinstance(self.model_settings, ModelSettings):
283
+ raise TypeError(
284
+ f"Agent model_settings must be a ModelSettings instance, "
285
+ f"got {type(self.model_settings).__name__}"
286
+ )
287
+
288
+ if not isinstance(self.input_guardrails, list):
289
+ raise TypeError(
290
+ f"Agent input_guardrails must be a list, got {type(self.input_guardrails).__name__}"
291
+ )
292
+
293
+ if not isinstance(self.output_guardrails, list):
294
+ raise TypeError(
295
+ f"Agent output_guardrails must be a list, "
296
+ f"got {type(self.output_guardrails).__name__}"
297
+ )
298
+
299
+ if self.output_type is not None:
300
+ from .agent_output import AgentOutputSchemaBase
301
+
302
+ if not (
303
+ isinstance(self.output_type, (type, AgentOutputSchemaBase))
304
+ or get_origin(self.output_type) is not None
305
+ ):
306
+ raise TypeError(
307
+ f"Agent output_type must be a type, AgentOutputSchemaBase, or None, "
308
+ f"got {type(self.output_type).__name__}"
309
+ )
310
+
311
+ if self.hooks is not None:
312
+ from .lifecycle import AgentHooksBase
313
+
314
+ if not isinstance(self.hooks, AgentHooksBase):
315
+ raise TypeError(
316
+ f"Agent hooks must be an AgentHooks instance or None, "
317
+ f"got {type(self.hooks).__name__}"
318
+ )
319
+
320
+ if (
321
+ not (
322
+ isinstance(self.tool_use_behavior, str)
323
+ and self.tool_use_behavior in ["run_llm_again", "stop_on_first_tool"]
324
+ )
325
+ and not isinstance(self.tool_use_behavior, dict)
326
+ and not callable(self.tool_use_behavior)
327
+ ):
328
+ raise TypeError(
329
+ f"Agent tool_use_behavior must be 'run_llm_again', 'stop_on_first_tool', "
330
+ f"StopAtTools dict, or callable, got {type(self.tool_use_behavior).__name__}"
331
+ )
332
+
333
+ if not isinstance(self.reset_tool_choice, bool):
334
+ raise TypeError(
335
+ f"Agent reset_tool_choice must be a boolean, "
336
+ f"got {type(self.reset_tool_choice).__name__}"
337
+ )
338
+
226
339
  def clone(self, **kwargs: Any) -> Agent[TContext]:
227
340
  """Make a copy of the agent, with the given arguments changed.
228
341
  Notes:
@@ -280,16 +393,31 @@ class Agent(AgentBase, Generic[TContext]):
280
393
  return run_agent
281
394
 
282
395
  async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> str | None:
283
- """Get the system prompt for the agent."""
284
396
  if isinstance(self.instructions, str):
285
397
  return self.instructions
286
398
  elif callable(self.instructions):
399
+ # Inspect the signature of the instructions function
400
+ sig = inspect.signature(self.instructions)
401
+ params = list(sig.parameters.values())
402
+
403
+ # Enforce exactly 2 parameters
404
+ if len(params) != 2:
405
+ raise TypeError(
406
+ f"'instructions' callable must accept exactly 2 arguments (context, agent), "
407
+ f"but got {len(params)}: {[p.name for p in params]}"
408
+ )
409
+
410
+ # Call the instructions function properly
287
411
  if inspect.iscoroutinefunction(self.instructions):
288
412
  return await cast(Awaitable[str], self.instructions(run_context, self))
289
413
  else:
290
414
  return cast(str, self.instructions(run_context, self))
415
+
291
416
  elif self.instructions is not None:
292
- logger.error(f"Instructions must be a string or a function, got {self.instructions}")
417
+ logger.error(
418
+ f"Instructions must be a string or a callable function, "
419
+ f"got {type(self.instructions).__name__}"
420
+ )
293
421
 
294
422
  return None
295
423
 
@@ -18,13 +18,17 @@ except ImportError as _e:
18
18
  ) from _e
19
19
 
20
20
  from openai import NOT_GIVEN, AsyncStream, NotGiven
21
- from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageToolCall
21
+ from openai.types.chat import (
22
+ ChatCompletionChunk,
23
+ ChatCompletionMessageFunctionToolCall,
24
+ )
22
25
  from openai.types.chat.chat_completion_message import (
23
26
  Annotation,
24
27
  AnnotationURLCitation,
25
28
  ChatCompletionMessage,
26
29
  )
27
- from openai.types.chat.chat_completion_message_tool_call import Function
30
+ from openai.types.chat.chat_completion_message_function_tool_call import Function
31
+ from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
28
32
  from openai.types.responses import Response
29
33
 
30
34
  from ... import _debug
@@ -321,6 +325,7 @@ class LitellmModel(Model):
321
325
  stream=stream,
322
326
  stream_options=stream_options,
323
327
  reasoning_effort=reasoning_effort,
328
+ top_logprobs=model_settings.top_logprobs,
324
329
  extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
325
330
  api_key=self.api_key,
326
331
  base_url=self.base_url,
@@ -361,7 +366,7 @@ class LitellmConverter:
361
366
  if message.role != "assistant":
362
367
  raise ModelBehaviorError(f"Unsupported role: {message.role}")
363
368
 
364
- tool_calls = (
369
+ tool_calls: list[ChatCompletionMessageToolCall] | None = (
365
370
  [LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]
366
371
  if message.tool_calls
367
372
  else None
@@ -412,11 +417,12 @@ class LitellmConverter:
412
417
  @classmethod
413
418
  def convert_tool_call_to_openai(
414
419
  cls, tool_call: litellm.types.utils.ChatCompletionMessageToolCall
415
- ) -> ChatCompletionMessageToolCall:
416
- return ChatCompletionMessageToolCall(
420
+ ) -> ChatCompletionMessageFunctionToolCall:
421
+ return ChatCompletionMessageFunctionToolCall(
417
422
  id=tool_call.id,
418
423
  type="function",
419
424
  function=Function(
420
- name=tool_call.function.name or "", arguments=tool_call.function.arguments
425
+ name=tool_call.function.name or "",
426
+ arguments=tool_call.function.arguments,
421
427
  ),
422
428
  )
agents/lifecycle.py CHANGED
@@ -42,7 +42,7 @@ class RunHooksBase(Generic[TContext, TAgent]):
42
42
  agent: TAgent,
43
43
  tool: Tool,
44
44
  ) -> None:
45
- """Called before a tool is invoked."""
45
+ """Called concurrently with tool invocation."""
46
46
  pass
47
47
 
48
48
  async def on_tool_end(
@@ -93,7 +93,7 @@ class AgentHooksBase(Generic[TContext, TAgent]):
93
93
  agent: TAgent,
94
94
  tool: Tool,
95
95
  ) -> None:
96
- """Called before a tool is invoked."""
96
+ """Called concurrently with tool invocation."""
97
97
  pass
98
98
 
99
99
  async def on_tool_end(
agents/model_settings.py CHANGED
@@ -55,6 +55,7 @@ Headers: TypeAlias = Mapping[str, Union[str, Omit]]
55
55
  ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, MCPToolChoice, None]
56
56
 
57
57
 
58
+
58
59
  @dataclass
59
60
  class ModelSettings:
60
61
  """Settings to use when calling an LLM.
@@ -101,21 +102,30 @@ class ModelSettings:
101
102
  [reasoning models](https://platform.openai.com/docs/guides/reasoning).
102
103
  """
103
104
 
105
+ verbosity: Literal["low", "medium", "high"] | None = None
106
+ """Constrains the verbosity of the model's response.
107
+ """
108
+
104
109
  metadata: dict[str, str] | None = None
105
110
  """Metadata to include with the model response call."""
106
111
 
107
112
  store: bool | None = None
108
113
  """Whether to store the generated model response for later retrieval.
109
- Defaults to True if not provided."""
114
+ For Responses API: automatically enabled when not specified.
115
+ For Chat Completions API: disabled when not specified."""
110
116
 
111
117
  include_usage: bool | None = None
112
118
  """Whether to include usage chunk.
113
- Defaults to True if not provided."""
119
+ Only available for Chat Completions API."""
114
120
 
115
121
  response_include: list[ResponseIncludable] | None = None
116
122
  """Additional output data to include in the model response.
117
123
  [include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
118
124
 
125
+ top_logprobs: int | None = None
126
+ """Number of top tokens to return logprobs for. Setting this will
127
+ automatically include ``"message.output_text.logprobs"`` in the response."""
128
+
119
129
  extra_query: Query | None = None
120
130
  """Additional query fields to provide with the request.
121
131
  Defaults to None if not provided."""
@@ -12,8 +12,8 @@ from openai.types.chat import (
12
12
  ChatCompletionContentPartTextParam,
13
13
  ChatCompletionDeveloperMessageParam,
14
14
  ChatCompletionMessage,
15
+ ChatCompletionMessageFunctionToolCallParam,
15
16
  ChatCompletionMessageParam,
16
- ChatCompletionMessageToolCallParam,
17
17
  ChatCompletionSystemMessageParam,
18
18
  ChatCompletionToolChoiceOptionParam,
19
19
  ChatCompletionToolMessageParam,
@@ -126,15 +126,18 @@ class Converter:
126
126
 
127
127
  if message.tool_calls:
128
128
  for tool_call in message.tool_calls:
129
- items.append(
130
- ResponseFunctionToolCall(
131
- id=FAKE_RESPONSES_ID,
132
- call_id=tool_call.id,
133
- arguments=tool_call.function.arguments,
134
- name=tool_call.function.name,
135
- type="function_call",
129
+ if tool_call.type == "function":
130
+ items.append(
131
+ ResponseFunctionToolCall(
132
+ id=FAKE_RESPONSES_ID,
133
+ call_id=tool_call.id,
134
+ arguments=tool_call.function.arguments,
135
+ name=tool_call.function.name,
136
+ type="function_call",
137
+ )
136
138
  )
137
- )
139
+ elif tool_call.type == "custom":
140
+ pass
138
141
 
139
142
  return items
140
143
 
@@ -420,7 +423,7 @@ class Converter:
420
423
  elif file_search := cls.maybe_file_search_call(item):
421
424
  asst = ensure_assistant_message()
422
425
  tool_calls = list(asst.get("tool_calls", []))
423
- new_tool_call = ChatCompletionMessageToolCallParam(
426
+ new_tool_call = ChatCompletionMessageFunctionToolCallParam(
424
427
  id=file_search["id"],
425
428
  type="function",
426
429
  function={
@@ -440,7 +443,7 @@ class Converter:
440
443
  asst = ensure_assistant_message()
441
444
  tool_calls = list(asst.get("tool_calls", []))
442
445
  arguments = func_call["arguments"] if func_call["arguments"] else "{}"
443
- new_tool_call = ChatCompletionMessageToolCallParam(
446
+ new_tool_call = ChatCompletionMessageFunctionToolCallParam(
444
447
  id=func_call["call_id"],
445
448
  type="function",
446
449
  function={
@@ -287,6 +287,8 @@ class OpenAIChatCompletionsModel(Model):
287
287
  stream_options=self._non_null_or_not_given(stream_options),
288
288
  store=self._non_null_or_not_given(store),
289
289
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
290
+ verbosity=self._non_null_or_not_given(model_settings.verbosity),
291
+ top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
290
292
  extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
291
293
  extra_query=model_settings.extra_query,
292
294
  extra_body=model_settings.extra_body,
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import json
4
4
  from collections.abc import AsyncIterator
5
5
  from dataclasses import dataclass
6
- from typing import TYPE_CHECKING, Any, Literal, overload
6
+ from typing import TYPE_CHECKING, Any, Literal, cast, overload
7
7
 
8
8
  from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
9
9
  from openai.types import ChatModel
@@ -247,9 +247,12 @@ class OpenAIResponsesModel(Model):
247
247
  converted_tools = Converter.convert_tools(tools, handoffs)
248
248
  response_format = Converter.get_response_format(output_schema)
249
249
 
250
- include: list[ResponseIncludable] = converted_tools.includes
250
+ include_set: set[str] = set(converted_tools.includes)
251
251
  if model_settings.response_include is not None:
252
- include = list({*include, *model_settings.response_include})
252
+ include_set.update(model_settings.response_include)
253
+ if model_settings.top_logprobs is not None:
254
+ include_set.add("message.output_text.logprobs")
255
+ include = cast(list[ResponseIncludable], list(include_set))
253
256
 
254
257
  if _debug.DONT_LOG_MODEL_DATA:
255
258
  logger.debug("Calling LLM")
@@ -264,6 +267,15 @@ class OpenAIResponsesModel(Model):
264
267
  f"Previous response id: {previous_response_id}\n"
265
268
  )
266
269
 
270
+ extra_args = dict(model_settings.extra_args or {})
271
+ if model_settings.top_logprobs is not None:
272
+ extra_args["top_logprobs"] = model_settings.top_logprobs
273
+ if model_settings.verbosity is not None:
274
+ if response_format != NOT_GIVEN:
275
+ response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
276
+ else:
277
+ response_format = {"verbosity": model_settings.verbosity}
278
+
267
279
  return await self._client.responses.create(
268
280
  previous_response_id=self._non_null_or_not_given(previous_response_id),
269
281
  instructions=self._non_null_or_not_given(system_instructions),
@@ -286,7 +298,7 @@ class OpenAIResponsesModel(Model):
286
298
  store=self._non_null_or_not_given(model_settings.store),
287
299
  reasoning=self._non_null_or_not_given(model_settings.reasoning),
288
300
  metadata=self._non_null_or_not_given(model_settings.metadata),
289
- **(model_settings.extra_args or {}),
301
+ **extra_args,
290
302
  )
291
303
 
292
304
  def _get_client(self) -> AsyncOpenAI:
agents/realtime/agent.py CHANGED
@@ -7,6 +7,7 @@ from dataclasses import dataclass, field
7
7
  from typing import Any, Callable, Generic, cast
8
8
 
9
9
  from ..agent import AgentBase
10
+ from ..guardrail import OutputGuardrail
10
11
  from ..handoffs import Handoff
11
12
  from ..lifecycle import AgentHooksBase, RunHooksBase
12
13
  from ..logger import logger
@@ -62,6 +63,11 @@ class RealtimeAgent(AgentBase, Generic[TContext]):
62
63
  modularity.
63
64
  """
64
65
 
66
+ output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list)
67
+ """A list of checks that run on the final output of the agent, after generating a response.
68
+ Runs only if the agent produces a final output.
69
+ """
70
+
65
71
  hooks: RealtimeAgentHooks | None = None
66
72
  """A class that receives callbacks on various lifecycle events for this agent.
67
73
  """
@@ -98,7 +98,7 @@ class RealtimeSession(RealtimeModelListener):
98
98
  self._stored_exception: Exception | None = None
99
99
 
100
100
  # Guardrails state tracking
101
- self._interrupted_by_guardrail = False
101
+ self._interrupted_response_ids: set[str] = set()
102
102
  self._item_transcripts: dict[str, str] = {} # item_id -> accumulated transcript
103
103
  self._item_guardrail_run_counts: dict[str, int] = {} # item_id -> run count
104
104
  self._debounce_text_length = self._run_config.get("guardrails_settings", {}).get(
@@ -242,7 +242,8 @@ class RealtimeSession(RealtimeModelListener):
242
242
 
243
243
  if current_length >= next_run_threshold:
244
244
  self._item_guardrail_run_counts[item_id] += 1
245
- self._enqueue_guardrail_task(self._item_transcripts[item_id])
245
+ # Pass response_id so we can ensure only a single interrupt per response
246
+ self._enqueue_guardrail_task(self._item_transcripts[item_id], event.response_id)
246
247
  elif event.type == "item_updated":
247
248
  is_new = not any(item.item_id == event.item.item_id for item in self._history)
248
249
  self._history = self._get_new_history(self._history, event.item)
@@ -274,7 +275,6 @@ class RealtimeSession(RealtimeModelListener):
274
275
  # Clear guardrail state for next turn
275
276
  self._item_transcripts.clear()
276
277
  self._item_guardrail_run_counts.clear()
277
- self._interrupted_by_guardrail = False
278
278
 
279
279
  await self._put_event(
280
280
  RealtimeAgentEndEvent(
@@ -442,10 +442,21 @@ class RealtimeSession(RealtimeModelListener):
442
442
  # Otherwise, add it to the end
443
443
  return old_history + [event]
444
444
 
445
- async def _run_output_guardrails(self, text: str) -> bool:
445
+ async def _run_output_guardrails(self, text: str, response_id: str) -> bool:
446
446
  """Run output guardrails on the given text. Returns True if any guardrail was triggered."""
447
- output_guardrails = self._run_config.get("output_guardrails", [])
448
- if not output_guardrails or self._interrupted_by_guardrail:
447
+ combined_guardrails = self._current_agent.output_guardrails + self._run_config.get(
448
+ "output_guardrails", []
449
+ )
450
+ seen_ids: set[int] = set()
451
+ output_guardrails = []
452
+ for guardrail in combined_guardrails:
453
+ guardrail_id = id(guardrail)
454
+ if guardrail_id not in seen_ids:
455
+ output_guardrails.append(guardrail)
456
+ seen_ids.add(guardrail_id)
457
+
458
+ # If we've already interrupted this response, skip
459
+ if not output_guardrails or response_id in self._interrupted_response_ids:
449
460
  return False
450
461
 
451
462
  triggered_results = []
@@ -465,8 +476,12 @@ class RealtimeSession(RealtimeModelListener):
465
476
  continue
466
477
 
467
478
  if triggered_results:
468
- # Mark as interrupted to prevent multiple interrupts
469
- self._interrupted_by_guardrail = True
479
+ # Double-check: bail if already interrupted for this response
480
+ if response_id in self._interrupted_response_ids:
481
+ return False
482
+
483
+ # Mark as interrupted immediately (before any awaits) to minimize race window
484
+ self._interrupted_response_ids.add(response_id)
470
485
 
471
486
  # Emit guardrail tripped event
472
487
  await self._put_event(
@@ -492,10 +507,10 @@ class RealtimeSession(RealtimeModelListener):
492
507
 
493
508
  return False
494
509
 
495
- def _enqueue_guardrail_task(self, text: str) -> None:
510
+ def _enqueue_guardrail_task(self, text: str, response_id: str) -> None:
496
511
  # Runs the guardrails in a separate task to avoid blocking the main loop
497
512
 
498
- task = asyncio.create_task(self._run_output_guardrails(text))
513
+ task = asyncio.create_task(self._run_output_guardrails(text, response_id))
499
514
  self._guardrail_tasks.add(task)
500
515
 
501
516
  # Add callback to remove completed tasks and handle exceptions
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
23
  Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.97.1
24
+ Requires-Dist: openai<2,>=1.99.6
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
@@ -2,7 +2,7 @@ agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=8Bc8YIHzv8Qf40tUAcHV5qqUkGSUxSraNkV0Y5xLFFQ,44894
5
- agents/agent.py,sha256=zBhC_bL5WuAmXAHJTj_ZgN5Nxj8jq8vZspdX8B0do38,12648
5
+ agents/agent.py,sha256=jn_nV38eVLK3QYh7dUmKO1AocQOCCPaHEERaSVt0l8g,17574
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
@@ -10,9 +10,9 @@ agents/function_schema.py,sha256=yZ3PEOmfy836Me_W4QlItMeFq2j4BtpuI2FmQswbIcQ,135
10
10
  agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
11
  agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
12
12
  agents/items.py,sha256=ntrJ-HuqSMC8HtIwS9pcqHYXtiQ2TJB6lHR-bcvNn4c,9848
13
- agents/lifecycle.py,sha256=C1LSoCa_0zf0nt7yI3SKL5bAAG4Cso6--Gmk8S8zpJg,3111
13
+ agents/lifecycle.py,sha256=sJwESHBHbml7rSYH360-P6x1bLyENcQWm4bT4rQcbuo,3129
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=uWYuQJDzQmXTBxt79fsIhgfxvf2rEiY09m9dDgk-yBk,6075
15
+ agents/model_settings.py,sha256=7zGEGxfXtRHlst9qYngYJc5mkr2l_mi5YuQDGiQ-qXM,6485
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=FKZlkGfw6QxItTkjFkCAQwXuV_pn69DIamGd3PiKQFk,2361
@@ -30,7 +30,7 @@ agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrS
30
30
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
31
31
  agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
32
32
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- agents/extensions/models/litellm_model.py,sha256=TWd57pzGJGpyvrBstqiFsPHlUFnExw1muchGGBA2jJc,15437
33
+ agents/extensions/models/litellm_model.py,sha256=4m6MVYaa-pJzXuBNRZGv0vw2R73R32B0EAZ1kXanVVw,15692
34
34
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
35
35
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
36
36
  agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
@@ -39,20 +39,20 @@ agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
39
39
  agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
40
40
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
42
- agents/models/chatcmpl_converter.py,sha256=m05aOXzO9y23qO3u2-7pHWZ7rdIWQZfckI2KACdIOUY,19829
42
+ agents/models/chatcmpl_converter.py,sha256=fdA-4_O7GabTCFZJOrtI6TdxFvjS4Bn4vf2RwVC9yNA,20012
43
43
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
44
44
  agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
45
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
47
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
48
- agents/models/openai_chatcompletions.py,sha256=erilKVPq6Gh6EukaqXbLImrhMwj75rdQJPt0Nz1UIi8,13019
48
+ agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
49
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
- agents/models/openai_responses.py,sha256=IaZ419gGkx8cWDZxi_2djvAor3RoUUiAdid782WOyv0,16720
50
+ agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
51
51
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
52
  agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
53
53
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
54
54
  agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
55
- agents/realtime/agent.py,sha256=xVQYVJjsbi4FpJZ8jwogfKUsguOzpWXWih6rqLZ8AgE,3745
55
+ agents/realtime/agent.py,sha256=yZDgycnLFtJcfl7UHak5GEyL2vdBGxegfqEiuuzGPEk,4027
56
56
  agents/realtime/config.py,sha256=FMLT2BdxjOCHmBnvd35sZk68U4jEXypngMRAPkm-irk,5828
57
57
  agents/realtime/events.py,sha256=YnyXmkc2rkIAcCDoW5yxylMYeXeaq_QTlyRR5u5VsaM,5534
58
58
  agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
@@ -62,7 +62,7 @@ agents/realtime/model_events.py,sha256=X7UrUU_g4u5gWaf2mUesJJ-Ik1Z1QE0Z-ZP7kDmX1
62
62
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
63
63
  agents/realtime/openai_realtime.py,sha256=vgzgklFcRpB9ZfsDda7DtXlBn3NF6bZdysta1DwQhrM,30120
64
64
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
65
- agents/realtime/session.py,sha256=OBIoEhuSAnneCBwF-JQLSnaPpqEtOcqbfvdm70icouI,23017
65
+ agents/realtime/session.py,sha256=EmbjWBoIw-1RAPICZbWtQ5OUaZh14xPXPwjHWXDU8c4,23766
66
66
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
67
67
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
68
68
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -97,7 +97,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
97
97
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
98
98
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
99
99
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
100
- openai_agents-0.2.5.dist-info/METADATA,sha256=7BsygcTUO7nQ0kG_qZy2wmEZ2Fl3TxEgzuIghp2MOe8,12104
101
- openai_agents-0.2.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- openai_agents-0.2.5.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
- openai_agents-0.2.5.dist-info/RECORD,,
100
+ openai_agents-0.2.7.dist-info/METADATA,sha256=AusANdnHsmV0VjQRDtmRQ3j5Ql8oT4rUKaqgZiR0Hzg,12104
101
+ openai_agents-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
+ openai_agents-0.2.7.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
103
+ openai_agents-0.2.7.dist-info/RECORD,,