openai-agents 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/run.py CHANGED
@@ -26,6 +26,7 @@ from .exceptions import (
26
26
  MaxTurnsExceeded,
27
27
  ModelBehaviorError,
28
28
  OutputGuardrailTripwireTriggered,
29
+ RunErrorDetails,
29
30
  )
30
31
  from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult
31
32
  from .handoffs import Handoff, HandoffInputFilter, handoff
@@ -180,6 +181,8 @@ class Runner:
180
181
 
181
182
  try:
182
183
  while True:
184
+ all_tools = await cls._get_all_tools(current_agent, context_wrapper)
185
+
183
186
  # Start an agent span if we don't have one. This span is ended if the current
184
187
  # agent changes, or if the agent loop ends.
185
188
  if current_span is None:
@@ -195,8 +198,6 @@ class Runner:
195
198
  output_type=output_type_name,
196
199
  )
197
200
  current_span.start(mark_as_current=True)
198
-
199
- all_tools = await cls._get_all_tools(current_agent)
200
201
  current_span.span_data.tools = [t.name for t in all_tools]
201
202
 
202
203
  current_turn += 1
@@ -283,6 +284,17 @@ class Runner:
283
284
  raise AgentsException(
284
285
  f"Unknown next step type: {type(turn_result.next_step)}"
285
286
  )
287
+ except AgentsException as exc:
288
+ exc.run_data = RunErrorDetails(
289
+ input=original_input,
290
+ new_items=generated_items,
291
+ raw_responses=model_responses,
292
+ last_agent=current_agent,
293
+ context_wrapper=context_wrapper,
294
+ input_guardrail_results=input_guardrail_results,
295
+ output_guardrail_results=[],
296
+ )
297
+ raise
286
298
  finally:
287
299
  if current_span:
288
300
  current_span.finish(reset_current=True)
@@ -513,6 +525,8 @@ class Runner:
513
525
  if streamed_result.is_complete:
514
526
  break
515
527
 
528
+ all_tools = await cls._get_all_tools(current_agent, context_wrapper)
529
+
516
530
  # Start an agent span if we don't have one. This span is ended if the current
517
531
  # agent changes, or if the agent loop ends.
518
532
  if current_span is None:
@@ -528,8 +542,6 @@ class Runner:
528
542
  output_type=output_type_name,
529
543
  )
530
544
  current_span.start(mark_as_current=True)
531
-
532
- all_tools = await cls._get_all_tools(current_agent)
533
545
  tool_names = [t.name for t in all_tools]
534
546
  current_span.span_data.tools = tool_names
535
547
  current_turn += 1
@@ -609,6 +621,19 @@ class Runner:
609
621
  streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
610
622
  elif isinstance(turn_result.next_step, NextStepRunAgain):
611
623
  pass
624
+ except AgentsException as exc:
625
+ streamed_result.is_complete = True
626
+ streamed_result._event_queue.put_nowait(QueueCompleteSentinel())
627
+ exc.run_data = RunErrorDetails(
628
+ input=streamed_result.input,
629
+ new_items=streamed_result.new_items,
630
+ raw_responses=streamed_result.raw_responses,
631
+ last_agent=current_agent,
632
+ context_wrapper=context_wrapper,
633
+ input_guardrail_results=streamed_result.input_guardrail_results,
634
+ output_guardrail_results=streamed_result.output_guardrail_results,
635
+ )
636
+ raise
612
637
  except Exception as e:
613
638
  if current_span:
614
639
  _error_tracing.attach_error_to_span(
@@ -689,6 +714,8 @@ class Runner:
689
714
  input_tokens=event.response.usage.input_tokens,
690
715
  output_tokens=event.response.usage.output_tokens,
691
716
  total_tokens=event.response.usage.total_tokens,
717
+ input_tokens_details=event.response.usage.input_tokens_details,
718
+ output_tokens_details=event.response.usage.output_tokens_details,
692
719
  )
693
720
  if event.response.usage
694
721
  else Usage()
@@ -953,8 +980,10 @@ class Runner:
953
980
  return handoffs
954
981
 
955
982
  @classmethod
956
- async def _get_all_tools(cls, agent: Agent[Any]) -> list[Tool]:
957
- return await agent.get_all_tools()
983
+ async def _get_all_tools(
984
+ cls, agent: Agent[Any], context_wrapper: RunContextWrapper[Any]
985
+ ) -> list[Tool]:
986
+ return await agent.get_all_tools(context_wrapper)
958
987
 
959
988
  @classmethod
960
989
  def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model:
agents/stream_events.py CHANGED
@@ -31,10 +31,13 @@ class RunItemStreamEvent:
31
31
  name: Literal[
32
32
  "message_output_created",
33
33
  "handoff_requested",
34
+ # This is misspelled, but we can't change it because that would be a breaking change
34
35
  "handoff_occured",
35
36
  "tool_called",
36
37
  "tool_output",
37
38
  "reasoning_item_created",
39
+ "mcp_approval_requested",
40
+ "mcp_list_tools",
38
41
  ]
39
42
  """The name of the event."""
40
43
 
agents/tool.py CHANGED
@@ -4,12 +4,14 @@ import inspect
4
4
  import json
5
5
  from collections.abc import Awaitable
6
6
  from dataclasses import dataclass
7
- from typing import Any, Callable, Literal, Union, overload
7
+ from typing import TYPE_CHECKING, Any, Callable, Literal, Union, overload
8
8
 
9
9
  from openai.types.responses.file_search_tool_param import Filters, RankingOptions
10
+ from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
11
+ from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
10
12
  from openai.types.responses.web_search_tool_param import UserLocation
11
13
  from pydantic import ValidationError
12
- from typing_extensions import Concatenate, ParamSpec
14
+ from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict
13
15
 
14
16
  from . import _debug
15
17
  from .computer import AsyncComputer, Computer
@@ -22,6 +24,9 @@ from .tracing import SpanError
22
24
  from .util import _error_tracing
23
25
  from .util._types import MaybeAwaitable
24
26
 
27
+ if TYPE_CHECKING:
28
+ from .agent import Agent
29
+
25
30
  ToolParams = ParamSpec("ToolParams")
26
31
 
27
32
  ToolFunctionWithoutContext = Callable[ToolParams, Any]
@@ -72,6 +77,11 @@ class FunctionTool:
72
77
  """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True,
73
78
  as it increases the likelihood of correct JSON input."""
74
79
 
80
+ is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True
81
+ """Whether the tool is enabled. Either a bool or a Callable that takes the run context and agent
82
+ and returns whether the tool is enabled. You can use this to dynamically enable/disable a tool
83
+ based on your context/state."""
84
+
75
85
 
76
86
  @dataclass
77
87
  class FileSearchTool:
@@ -130,7 +140,115 @@ class ComputerTool:
130
140
  return "computer_use_preview"
131
141
 
132
142
 
133
- Tool = Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool]
143
+ @dataclass
144
+ class MCPToolApprovalRequest:
145
+ """A request to approve a tool call."""
146
+
147
+ ctx_wrapper: RunContextWrapper[Any]
148
+ """The run context."""
149
+
150
+ data: McpApprovalRequest
151
+ """The data from the MCP tool approval request."""
152
+
153
+
154
+ class MCPToolApprovalFunctionResult(TypedDict):
155
+ """The result of an MCP tool approval function."""
156
+
157
+ approve: bool
158
+ """Whether to approve the tool call."""
159
+
160
+ reason: NotRequired[str]
161
+ """An optional reason, if rejected."""
162
+
163
+
164
+ MCPToolApprovalFunction = Callable[
165
+ [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]
166
+ ]
167
+ """A function that approves or rejects a tool call."""
168
+
169
+
170
+ @dataclass
171
+ class HostedMCPTool:
172
+ """A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and
173
+ call tools, without requiring a a round trip back to your code.
174
+ If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible
175
+ environment, or you just prefer to run tool calls locally, then you can instead use the servers
176
+ in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent."""
177
+
178
+ tool_config: Mcp
179
+ """The MCP tool config, which includes the server URL and other settings."""
180
+
181
+ on_approval_request: MCPToolApprovalFunction | None = None
182
+ """An optional function that will be called if approval is requested for an MCP tool. If not
183
+ provided, you will need to manually add approvals/rejections to the input and call
184
+ `Runner.run(...)` again."""
185
+
186
+ @property
187
+ def name(self):
188
+ return "hosted_mcp"
189
+
190
+
191
+ @dataclass
192
+ class CodeInterpreterTool:
193
+ """A tool that allows the LLM to execute code in a sandboxed environment."""
194
+
195
+ tool_config: CodeInterpreter
196
+ """The tool config, which includes the container and other settings."""
197
+
198
+ @property
199
+ def name(self):
200
+ return "code_interpreter"
201
+
202
+
203
+ @dataclass
204
+ class ImageGenerationTool:
205
+ """A tool that allows the LLM to generate images."""
206
+
207
+ tool_config: ImageGeneration
208
+ """The tool config, which image generation settings."""
209
+
210
+ @property
211
+ def name(self):
212
+ return "image_generation"
213
+
214
+
215
+ @dataclass
216
+ class LocalShellCommandRequest:
217
+ """A request to execute a command on a shell."""
218
+
219
+ ctx_wrapper: RunContextWrapper[Any]
220
+ """The run context."""
221
+
222
+ data: LocalShellCall
223
+ """The data from the local shell tool call."""
224
+
225
+
226
+ LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]
227
+ """A function that executes a command on a shell."""
228
+
229
+
230
+ @dataclass
231
+ class LocalShellTool:
232
+ """A tool that allows the LLM to execute commands on a shell."""
233
+
234
+ executor: LocalShellExecutor
235
+ """A function that executes a command on a shell."""
236
+
237
+ @property
238
+ def name(self):
239
+ return "local_shell"
240
+
241
+
242
+ Tool = Union[
243
+ FunctionTool,
244
+ FileSearchTool,
245
+ WebSearchTool,
246
+ ComputerTool,
247
+ HostedMCPTool,
248
+ LocalShellTool,
249
+ ImageGenerationTool,
250
+ CodeInterpreterTool,
251
+ ]
134
252
  """A tool that can be used in an agent."""
135
253
 
136
254
 
@@ -152,6 +270,7 @@ def function_tool(
152
270
  use_docstring_info: bool = True,
153
271
  failure_error_function: ToolErrorFunction | None = None,
154
272
  strict_mode: bool = True,
273
+ is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
155
274
  ) -> FunctionTool:
156
275
  """Overload for usage as @function_tool (no parentheses)."""
157
276
  ...
@@ -166,6 +285,7 @@ def function_tool(
166
285
  use_docstring_info: bool = True,
167
286
  failure_error_function: ToolErrorFunction | None = None,
168
287
  strict_mode: bool = True,
288
+ is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
169
289
  ) -> Callable[[ToolFunction[...]], FunctionTool]:
170
290
  """Overload for usage as @function_tool(...)."""
171
291
  ...
@@ -180,6 +300,7 @@ def function_tool(
180
300
  use_docstring_info: bool = True,
181
301
  failure_error_function: ToolErrorFunction | None = default_tool_error_function,
182
302
  strict_mode: bool = True,
303
+ is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,
183
304
  ) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
184
305
  """
185
306
  Decorator to create a FunctionTool from a function. By default, we will:
@@ -208,6 +329,9 @@ def function_tool(
208
329
  If False, it allows non-strict JSON schemas. For example, if a parameter has a default
209
330
  value, it will be optional, additional properties are allowed, etc. See here for more:
210
331
  https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas
332
+ is_enabled: Whether the tool is enabled. Can be a bool or a callable that takes the run
333
+ context and agent and returns whether the tool is enabled. Disabled tools are hidden
334
+ from the LLM at runtime.
211
335
  """
212
336
 
213
337
  def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool:
@@ -297,6 +421,7 @@ def function_tool(
297
421
  params_json_schema=schema.params_json_schema,
298
422
  on_invoke_tool=_on_invoke_tool,
299
423
  strict_json_schema=strict_mode,
424
+ is_enabled=is_enabled,
300
425
  )
301
426
 
302
427
  # If func is actually a callable, we were used as @function_tool with no parentheses
@@ -188,10 +188,27 @@ class BatchTraceProcessor(TracingProcessor):
188
188
  # Track when we next *must* perform a scheduled export
189
189
  self._next_export_time = time.time() + self._schedule_delay
190
190
 
191
- self._worker_thread = threading.Thread(target=self._run, daemon=True)
192
- self._worker_thread.start()
191
+ # We lazily start the background worker thread the first time a span/trace is queued.
192
+ self._worker_thread: threading.Thread | None = None
193
+ self._thread_start_lock = threading.Lock()
194
+
195
+ def _ensure_thread_started(self) -> None:
196
+ # Fast path without holding the lock
197
+ if self._worker_thread and self._worker_thread.is_alive():
198
+ return
199
+
200
+ # Double-checked locking to avoid starting multiple threads
201
+ with self._thread_start_lock:
202
+ if self._worker_thread and self._worker_thread.is_alive():
203
+ return
204
+
205
+ self._worker_thread = threading.Thread(target=self._run, daemon=True)
206
+ self._worker_thread.start()
193
207
 
194
208
  def on_trace_start(self, trace: Trace) -> None:
209
+ # Ensure the background worker is running before we enqueue anything.
210
+ self._ensure_thread_started()
211
+
195
212
  try:
196
213
  self._queue.put_nowait(trace)
197
214
  except queue.Full:
@@ -206,6 +223,9 @@ class BatchTraceProcessor(TracingProcessor):
206
223
  pass
207
224
 
208
225
  def on_span_end(self, span: Span[Any]) -> None:
226
+ # Ensure the background worker is running before we enqueue anything.
227
+ self._ensure_thread_started()
228
+
209
229
  try:
210
230
  self._queue.put_nowait(span)
211
231
  except queue.Full:
@@ -216,7 +236,13 @@ class BatchTraceProcessor(TracingProcessor):
216
236
  Called when the application stops. We signal our thread to stop, then join it.
217
237
  """
218
238
  self._shutdown_event.set()
219
- self._worker_thread.join(timeout=timeout)
239
+
240
+ # Only join if we ever started the background thread; otherwise flush synchronously.
241
+ if self._worker_thread and self._worker_thread.is_alive():
242
+ self._worker_thread.join(timeout=timeout)
243
+ else:
244
+ # No background thread: process any remaining items synchronously.
245
+ self._export_batches(force=True)
220
246
 
221
247
  def force_flush(self):
222
248
  """
agents/usage.py CHANGED
@@ -1,4 +1,6 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
+
3
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
2
4
 
3
5
 
4
6
  @dataclass
@@ -9,9 +11,18 @@ class Usage:
9
11
  input_tokens: int = 0
10
12
  """Total input tokens sent, across all requests."""
11
13
 
14
+ input_tokens_details: InputTokensDetails = field(
15
+ default_factory=lambda: InputTokensDetails(cached_tokens=0)
16
+ )
17
+ """Details about the input tokens, matching responses API usage details."""
12
18
  output_tokens: int = 0
13
19
  """Total output tokens received, across all requests."""
14
20
 
21
+ output_tokens_details: OutputTokensDetails = field(
22
+ default_factory=lambda: OutputTokensDetails(reasoning_tokens=0)
23
+ )
24
+ """Details about the output tokens, matching responses API usage details."""
25
+
15
26
  total_tokens: int = 0
16
27
  """Total tokens sent and received, across all requests."""
17
28
 
@@ -20,3 +31,12 @@ class Usage:
20
31
  self.input_tokens += other.input_tokens if other.input_tokens else 0
21
32
  self.output_tokens += other.output_tokens if other.output_tokens else 0
22
33
  self.total_tokens += other.total_tokens if other.total_tokens else 0
34
+ self.input_tokens_details = InputTokensDetails(
35
+ cached_tokens=self.input_tokens_details.cached_tokens
36
+ + other.input_tokens_details.cached_tokens
37
+ )
38
+
39
+ self.output_tokens_details = OutputTokensDetails(
40
+ reasoning_tokens=self.output_tokens_details.reasoning_tokens
41
+ + other.output_tokens_details.reasoning_tokens
42
+ )
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING
3
3
  from pydantic import BaseModel
4
4
 
5
5
  if TYPE_CHECKING:
6
+ from ..exceptions import RunErrorDetails
6
7
  from ..result import RunResult, RunResultBase, RunResultStreaming
7
8
 
8
9
 
@@ -38,6 +39,17 @@ def pretty_print_result(result: "RunResult") -> str:
38
39
  return output
39
40
 
40
41
 
42
+ def pretty_print_run_error_details(result: "RunErrorDetails") -> str:
43
+ output = "RunErrorDetails:"
44
+ output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)'
45
+ output += f"\n- {len(result.new_items)} new item(s)"
46
+ output += f"\n- {len(result.raw_responses)} raw response(s)"
47
+ output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
48
+ output += "\n(See `RunErrorDetails` for more details)"
49
+
50
+ return output
51
+
52
+
41
53
  def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str:
42
54
  output = "RunResultStreaming:"
43
55
  output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)'
agents/voice/model.py CHANGED
@@ -17,9 +17,11 @@ DEFAULT_TTS_BUFFER_SIZE = 120
17
17
  TTSVoice = Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]
18
18
  """Exportable type for the TTSModelSettings voice enum"""
19
19
 
20
+
20
21
  @dataclass
21
22
  class TTSModelSettings:
22
23
  """Settings for a TTS model."""
24
+
23
25
  voice: TTSVoice | None = None
24
26
  """
25
27
  The voice to use for the TTS model. If not provided, the default voice for the respective model
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,7 +20,7 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
22
  Requires-Dist: mcp<2,>=1.8.0; python_version >= '3.10'
23
- Requires-Dist: openai>=1.76.0
23
+ Requires-Dist: openai>=1.81.0
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
26
26
  Requires-Dist: types-requests<3,>=2.0
@@ -1,53 +1,53 @@
1
- agents/__init__.py,sha256=T0Irpxfnd2SDSQ0aD5YvhwtAGTnzybnFb7qcvSsO5qY,6906
1
+ agents/__init__.py,sha256=ZnZazUPdfh19uNOgNyu2OBQr5zz2DdUrKgam3Y9BAk4,7438
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=7gKaBEDwQs7DqlcC3hSbU1I04wisvClXE0c3pZ9sGEM,34167
5
- agents/agent.py,sha256=aTC49v9sQJm0gv5a3hW8xCgtMhk2TfjycBP8JyeOJ84,10571
6
- agents/agent_output.py,sha256=fEK1Yn0XfMrLXZRsBcSig-YDZZ0kZpCgATdwZ-eHYqQ,7127
4
+ agents/_run_impl.py,sha256=DyIodrzaWNdydZWDKJT6wGg3v445jwBUOwxb5mM-c58,42742
5
+ agents/agent.py,sha256=eeOWjR-a0xOB4Ctt9OTl93rEr_VRAkynN2M0vfx2nTs,11195
6
+ agents/agent_output.py,sha256=cVIVwpsgOfloCHL0BD9DSCBCzW_s3T4LesDhvJRu2Uc,7127
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
- agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
8
+ agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
9
  agents/function_schema.py,sha256=k4GTdxf5bvcisVr9b4xSdTGzkB5oP3XZnJMdouABCsw,12909
10
10
  agents/guardrail.py,sha256=vWWcApo9s_6aHapQ5AMko08MqC8Jrlk-J5iqIRctCDQ,9291
11
- agents/handoffs.py,sha256=wRg-HBGKBZev88mOg_mfv6CR8T2kewZM8eX3tb71l1g,9043
12
- agents/items.py,sha256=6Xnf6a2tIgM8Pz3T2Xr6J8wgok8fI-KhyKW1XdfHBJU,8306
11
+ agents/handoffs.py,sha256=mWvtgWMJjSIlhUR9xf-pXOJbWVCKxNBXytP9tsPGWII,9045
12
+ agents/items.py,sha256=lXFc_gKLEqwXIcyMKk4Q-6Rjry0MWD93xlvk4Y1W970,9695
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
15
  agents/model_settings.py,sha256=7s9YjfHBVz1f1a-V3dd-8eMe-IAgfDXhQgChI27Kz00,3326
16
16
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
17
- agents/result.py,sha256=dhtOaLIoOp5PYC4qcsgoU5qg2yvdI_VKdCy6i2qth7k,9305
18
- agents/run.py,sha256=kPxqU6KjFzkuVdH-UbKPUhJrvl70ChMnQbfI4UDD3fw,40276
17
+ agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
18
+ agents/run.py,sha256=cGvRtw9Ck7gEthmdnUBtb82lD7y0JgIZFsjMXbkCJZY,41816
19
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
- agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
20
+ agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
21
21
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
22
- agents/tool.py,sha256=XKeR1khfbaPbyO8DiGsn8WMO_Hkbrmm9NQzGeRsKcPs,11641
23
- agents/usage.py,sha256=-MZOmSDVdWxA2V_yVVnmUcwVcLdvYFccv0HXZ7Ow3_A,733
22
+ agents/tool.py,sha256=yDUuR6oAO2NufHoJqKtqLExGx6ClHPTYYPsdraf39P0,15675
23
+ agents/usage.py,sha256=GB83eElU-DVkdutGObGDSX5vJNy8ssu3Xbpp5LlHfwU,1643
24
24
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
25
25
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01fX83krKM,1977
27
27
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
- agents/extensions/visualization.py,sha256=AQFC7kQlZqTI6QVkyDHrF_DodCytrrhcLg35nfRd_JA,4256
28
+ agents/extensions/visualization.py,sha256=g2eEwW22qe3A4WtH37LwaHhK3QZE9FYHVw9IcOVpwbk,4699
29
29
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- agents/extensions/models/litellm_model.py,sha256=JV9DfS6gj9QR0NMFyX4o3Dq5QqJNQ3BksBh7ijrgWoA,13776
30
+ agents/extensions/models/litellm_model.py,sha256=zcjdGI2EyhKqiXnobl_WPuPL8_zl2sGDOz7bul3Kjzs,14447
31
31
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
32
32
  agents/mcp/__init__.py,sha256=_aDpMTvYCe1IezOEasZ0vmombBM8r7BD8lpXiKi-UlM,499
33
- agents/mcp/server.py,sha256=f4F5DSH3TfMJ1SvruRqJT_wYbWhKQbRzVhdYdAjnOnk,15748
34
- agents/mcp/util.py,sha256=dIEdYDMc7Sjp-DFQnvoc4VWU-B7Heyx0I41bcW7RlEg,5232
33
+ agents/mcp/server.py,sha256=mP_JxJzz00prX_0SzTZO38bjvhj4A61icypUjvxdG4k,15915
34
+ agents/mcp/util.py,sha256=qXbAo9O-yv0JfmZBxDJIQ8ieHMTNWTEX5lnSVBv637k,5243
35
35
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
37
37
  agents/models/chatcmpl_converter.py,sha256=Sae-ITlhQz8_SiFiSat7Z-lavqIuczduOXR_PF_f6cs,18126
38
38
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
39
- agents/models/chatcmpl_stream_handler.py,sha256=VjskdeGnepn0iJbxsqNZrexcuAYAV1zd5hwt0lU8E7I,12452
39
+ agents/models/chatcmpl_stream_handler.py,sha256=sDl8O7AKxpWxAq7-bgCUClD5JySUnbQ8RTPc0HeDElM,13713
40
40
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
41
41
  agents/models/interface.py,sha256=eEpiIBn9MxsmXUK1HPpn3c7TYPduBYC7tsWnDHSYJHo,3553
42
42
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
43
- agents/models/openai_chatcompletions.py,sha256=QiUOdd4gQ7f-uslm4SqRlv9bt3T1oFL87EnqVYlWw4A,10390
43
+ agents/models/openai_chatcompletions.py,sha256=aSE1cww-C-6p5PXpslo70X-V0MHqbN6msLhnawFbhJU,11445
44
44
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
45
- agents/models/openai_responses.py,sha256=-hwXW7gXYOs4EbVrFhsil-tWb63gtLj_vaGQ9HXf6nE,14331
45
+ agents/models/openai_responses.py,sha256=JFajISS-sYYxKhb66tZ5cYPEqIYOj6ap762Z-87c7fE,15368
46
46
  agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
47
47
  agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
48
48
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
49
49
  agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
50
- agents/tracing/processors.py,sha256=UVrP1UjhPoJKV-CDDVzxmw19miYK-W8T_-0sx40erZM,10259
50
+ agents/tracing/processors.py,sha256=lOdZHwo0rQAflVkKWOZinnWyLtS0stALyydiFOC0gss,11389
51
51
  agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
52
52
  agents/tracing/setup.py,sha256=YnEDTaRG_b510vtsXbOaCUZ0nf7MOr1ULvOpQOHtdBs,6776
53
53
  agents/tracing/span_data.py,sha256=nI2Fbu1ORE8ybE6m6RuddTJF5E5xFmEj8Mq5bSFv4bE,9017
@@ -58,7 +58,7 @@ agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  agents/util/_coro.py,sha256=S38XUYFC7bqTELSgMUBsAX1GoRlIrV7coupcUAWH__4,45
59
59
  agents/util/_error_tracing.py,sha256=hdkYNx180b18lP0PSB1toE5atNHsMg_Bm9Osw812vLo,421
60
60
  agents/util/_json.py,sha256=eKeQeMlQkBXRFeL3ilNZFmszGyfhtzZdW_GW_As6dcg,972
61
- agents/util/_pretty_print.py,sha256=rRVp24UmTgzCm-W4ritWBOxxnPRinzFdrZlOhTi1KVQ,2227
61
+ agents/util/_pretty_print.py,sha256=pnrM81KRG4G21jZnYrYBCkPgtUeP8qcnJm-9tpAV1WA,2738
62
62
  agents/util/_transforms.py,sha256=CZe74NOHkHneyo4fHYfFWksCSTn-kXtEyejL9P0_xlA,270
63
63
  agents/util/_types.py,sha256=8KxYfCw0gYSMWcQmacJoc3Q7Lc46LmT-AWvhF10KJ-E,160
64
64
  agents/voice/__init__.py,sha256=4VWBUjyoXC6dGFuk-oZQGg8T32bFxVwy371c-zDK-EU,1537
@@ -66,7 +66,7 @@ agents/voice/events.py,sha256=4aPAZC0__ocgmg_mcX4c1zv9Go-YdKIVItQ2kYgtye0,1216
66
66
  agents/voice/exceptions.py,sha256=QcyfvaUTBe4gxbFP82oDSa_puzZ4Z4O4k01B8pAHnK0,233
67
67
  agents/voice/imports.py,sha256=VaE5I8aJTP9Zl_0-y9dx1UcAP7KPRDMaikFK2jFnn8s,348
68
68
  agents/voice/input.py,sha256=FSbdHMIdLVKX4vYcmf3WBJ5dAlh5zMDjCAuGfXOZTQs,2910
69
- agents/voice/model.py,sha256=haVPgL3xlzkUyMeVaDphB74TlcZpruagMsx6CQxpx5g,5954
69
+ agents/voice/model.py,sha256=LWnIWEwU0-aFkff3kbTKkxejnYqzS2XHG5Qm2YcrzFI,5956
70
70
  agents/voice/pipeline.py,sha256=5LKTTDytQt4QlZzVKgbB9x3X2zA-TeR94FTi15vIUc0,6259
71
71
  agents/voice/pipeline_config.py,sha256=_cynbnzxvQijxkGrMYHJzIV54F9bRvDsPV24qexVO8c,1759
72
72
  agents/voice/result.py,sha256=Yx9JCMGCE9OfXacaBFfFLQJRwkNo5-h4Nqm9OPnemU4,11107
@@ -76,7 +76,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
76
76
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
77
77
  agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
78
78
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
79
- openai_agents-0.0.15.dist-info/METADATA,sha256=aWuj1znysjWdTLEA_Qtou1aOsPBNRRVFXiPcIoJ4o9c,8163
80
- openai_agents-0.0.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- openai_agents-0.0.15.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
- openai_agents-0.0.15.dist-info/RECORD,,
79
+ openai_agents-0.0.17.dist-info/METADATA,sha256=2SF0ZEolF_69dW0eTSWGuJc_RLuOAnYkqvBqj4IgqCw,8163
80
+ openai_agents-0.0.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
+ openai_agents-0.0.17.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
+ openai_agents-0.0.17.dist-info/RECORD,,