openai-agents 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/__init__.py CHANGED
@@ -100,6 +100,7 @@ from .tracing import (
100
100
  transcription_span,
101
101
  )
102
102
  from .usage import Usage
103
+ from .version import __version__
103
104
 
104
105
 
105
106
  def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
@@ -247,4 +248,5 @@ __all__ = [
247
248
  "gen_trace_id",
248
249
  "gen_span_id",
249
250
  "default_tool_error_function",
251
+ "__version__",
250
252
  ]
agents/agent.py CHANGED
@@ -6,7 +6,7 @@ from collections.abc import Awaitable
6
6
  from dataclasses import dataclass, field
7
7
  from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
8
8
 
9
- from typing_extensions import TypeAlias, TypedDict
9
+ from typing_extensions import NotRequired, TypeAlias, TypedDict
10
10
 
11
11
  from .guardrail import InputGuardrail, OutputGuardrail
12
12
  from .handoffs import Handoff
@@ -44,7 +44,7 @@ ToolsToFinalOutputFunction: TypeAlias = Callable[
44
44
  MaybeAwaitable[ToolsToFinalOutputResult],
45
45
  ]
46
46
  """A function that takes a run context and a list of tool results, and returns a
47
- `ToolToFinalOutputResult`.
47
+ `ToolsToFinalOutputResult`.
48
48
  """
49
49
 
50
50
 
@@ -53,6 +53,15 @@ class StopAtTools(TypedDict):
53
53
  """A list of tool names, any of which will stop the agent from running further."""
54
54
 
55
55
 
56
+ class MCPConfig(TypedDict):
57
+ """Configuration for MCP servers."""
58
+
59
+ convert_schemas_to_strict: NotRequired[bool]
60
+ """If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a
61
+ best-effort conversion, so some schemas may not be convertible. Defaults to False.
62
+ """
63
+
64
+
56
65
  @dataclass
57
66
  class Agent(Generic[TContext]):
58
67
  """An agent is an AI model configured with instructions, tools, guardrails, handoffs and more.
@@ -119,6 +128,9 @@ class Agent(Generic[TContext]):
119
128
  longer needed.
120
129
  """
121
130
 
131
+ mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig())
132
+ """Configuration for MCP servers."""
133
+
122
134
  input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list)
123
135
  """A list of checks that run in parallel to the agent's execution, before generating a
124
136
  response. Runs only if the agent is the first agent in the chain.
@@ -224,7 +236,8 @@ class Agent(Generic[TContext]):
224
236
 
225
237
  async def get_mcp_tools(self) -> list[Tool]:
226
238
  """Fetches the available tools from the MCP servers."""
227
- return await MCPUtil.get_all_function_tools(self.mcp_servers)
239
+ convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
240
+ return await MCPUtil.get_all_function_tools(self.mcp_servers, convert_schemas_to_strict)
228
241
 
229
242
  async def get_all_tools(self) -> list[Tool]:
230
243
  """All agent tools, including MCP tools and function tools."""
agents/mcp/util.py CHANGED
@@ -2,6 +2,8 @@ import functools
2
2
  import json
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
+ from agents.strict_schema import ensure_strict_json_schema
6
+
5
7
  from .. import _debug
6
8
  from ..exceptions import AgentsException, ModelBehaviorError, UserError
7
9
  from ..logger import logger
@@ -19,12 +21,14 @@ class MCPUtil:
19
21
  """Set of utilities for interop between MCP and Agents SDK tools."""
20
22
 
21
23
  @classmethod
22
- async def get_all_function_tools(cls, servers: list["MCPServer"]) -> list[Tool]:
24
+ async def get_all_function_tools(
25
+ cls, servers: list["MCPServer"], convert_schemas_to_strict: bool
26
+ ) -> list[Tool]:
23
27
  """Get all function tools from a list of MCP servers."""
24
28
  tools = []
25
29
  tool_names: set[str] = set()
26
30
  for server in servers:
27
- server_tools = await cls.get_function_tools(server)
31
+ server_tools = await cls.get_function_tools(server, convert_schemas_to_strict)
28
32
  server_tool_names = {tool.name for tool in server_tools}
29
33
  if len(server_tool_names & tool_names) > 0:
30
34
  raise UserError(
@@ -37,25 +41,42 @@ class MCPUtil:
37
41
  return tools
38
42
 
39
43
  @classmethod
40
- async def get_function_tools(cls, server: "MCPServer") -> list[Tool]:
44
+ async def get_function_tools(
45
+ cls, server: "MCPServer", convert_schemas_to_strict: bool
46
+ ) -> list[Tool]:
41
47
  """Get all function tools from a single MCP server."""
42
48
 
43
49
  with mcp_tools_span(server=server.name) as span:
44
50
  tools = await server.list_tools()
45
51
  span.span_data.result = [tool.name for tool in tools]
46
52
 
47
- return [cls.to_function_tool(tool, server) for tool in tools]
53
+ return [cls.to_function_tool(tool, server, convert_schemas_to_strict) for tool in tools]
48
54
 
49
55
  @classmethod
50
- def to_function_tool(cls, tool: "MCPTool", server: "MCPServer") -> FunctionTool:
56
+ def to_function_tool(
57
+ cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool
58
+ ) -> FunctionTool:
51
59
  """Convert an MCP tool to an Agents SDK function tool."""
52
60
  invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool)
61
+ schema, is_strict = tool.inputSchema, False
62
+
63
+ # MCP spec doesn't require the inputSchema to have `properties`, but OpenAI spec does.
64
+ if "properties" not in schema:
65
+ schema["properties"] = {}
66
+
67
+ if convert_schemas_to_strict:
68
+ try:
69
+ schema = ensure_strict_json_schema(schema)
70
+ is_strict = True
71
+ except Exception as e:
72
+ logger.info(f"Error converting MCP schema to strict mode: {e}")
73
+
53
74
  return FunctionTool(
54
75
  name=tool.name,
55
76
  description=tool.description or "",
56
- params_json_schema=tool.inputSchema,
77
+ params_json_schema=schema,
57
78
  on_invoke_tool=invoke_func,
58
- strict_json_schema=False,
79
+ strict_json_schema=is_strict,
59
80
  )
60
81
 
61
82
  @classmethod
agents/model_settings.py CHANGED
@@ -1,8 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
- from dataclasses import dataclass
3
+ from dataclasses import dataclass, fields, replace
4
4
  from typing import Literal
5
5
 
6
+ from openai.types.shared import Reasoning
7
+
6
8
 
7
9
  @dataclass
8
10
  class ModelSettings:
@@ -30,8 +32,9 @@ class ModelSettings:
30
32
  tool_choice: Literal["auto", "required", "none"] | str | None = None
31
33
  """The tool choice to use when calling the model."""
32
34
 
33
- parallel_tool_calls: bool | None = False
34
- """Whether to use parallel tool calls when calling the model."""
35
+ parallel_tool_calls: bool | None = None
36
+ """Whether to use parallel tool calls when calling the model.
37
+ Defaults to False if not provided."""
35
38
 
36
39
  truncation: Literal["auto", "disabled"] | None = None
37
40
  """The truncation strategy to use when calling the model."""
@@ -39,18 +42,27 @@ class ModelSettings:
39
42
  max_tokens: int | None = None
40
43
  """The maximum number of output tokens to generate."""
41
44
 
45
+ reasoning: Reasoning | None = None
46
+ """Configuration options for
47
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
48
+ """
49
+
50
+ metadata: dict[str, str] | None = None
51
+ """Metadata to include with the model response call."""
52
+
53
+ store: bool | None = None
54
+ """Whether to store the generated model response for later retrieval.
55
+ Defaults to True if not provided."""
56
+
42
57
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
43
58
  """Produce a new ModelSettings by overlaying any non-None values from the
44
59
  override on top of this instance."""
45
60
  if override is None:
46
61
  return self
47
- return ModelSettings(
48
- temperature=override.temperature or self.temperature,
49
- top_p=override.top_p or self.top_p,
50
- frequency_penalty=override.frequency_penalty or self.frequency_penalty,
51
- presence_penalty=override.presence_penalty or self.presence_penalty,
52
- tool_choice=override.tool_choice or self.tool_choice,
53
- parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
54
- truncation=override.truncation or self.truncation,
55
- max_tokens=override.max_tokens or self.max_tokens,
56
- )
62
+
63
+ changes = {
64
+ field.name: getattr(override, field.name)
65
+ for field in fields(self)
66
+ if getattr(override, field.name) is not None
67
+ }
68
+ return replace(self, **changes)
@@ -518,6 +518,9 @@ class OpenAIChatCompletionsModel(Model):
518
518
  f"Response format: {response_format}\n"
519
519
  )
520
520
 
521
+ reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
522
+ store = _Converter.get_store_param(self._get_client(), model_settings)
523
+
521
524
  ret = await self._get_client().chat.completions.create(
522
525
  model=self.model,
523
526
  messages=converted_messages,
@@ -532,7 +535,10 @@ class OpenAIChatCompletionsModel(Model):
532
535
  parallel_tool_calls=parallel_tool_calls,
533
536
  stream=stream,
534
537
  stream_options={"include_usage": True} if stream else NOT_GIVEN,
538
+ store=self._non_null_or_not_given(store),
539
+ reasoning_effort=self._non_null_or_not_given(reasoning_effort),
535
540
  extra_headers=_HEADERS,
541
+ metadata=self._non_null_or_not_given(model_settings.metadata),
536
542
  )
537
543
 
538
544
  if isinstance(ret, ChatCompletion):
@@ -551,6 +557,7 @@ class OpenAIChatCompletionsModel(Model):
551
557
  temperature=model_settings.temperature,
552
558
  tools=[],
553
559
  parallel_tool_calls=parallel_tool_calls or False,
560
+ reasoning=model_settings.reasoning,
554
561
  )
555
562
  return response, ret
556
563
 
@@ -561,6 +568,12 @@ class OpenAIChatCompletionsModel(Model):
561
568
 
562
569
 
563
570
  class _Converter:
571
+ @classmethod
572
+ def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:
573
+ # Match the behavior of Responses where store is True when not given
574
+ default_store = True if str(client.base_url).startswith("https://api.openai.com") else None
575
+ return model_settings.store if model_settings.store is not None else default_store
576
+
564
577
  @classmethod
565
578
  def convert_tool_choice(
566
579
  cls, tool_choice: Literal["auto", "required", "none"] | str | None
@@ -919,12 +932,13 @@ class _Converter:
919
932
  elif func_call := cls.maybe_function_tool_call(item):
920
933
  asst = ensure_assistant_message()
921
934
  tool_calls = list(asst.get("tool_calls", []))
935
+ arguments = func_call["arguments"] if func_call["arguments"] else "{}"
922
936
  new_tool_call = ChatCompletionMessageToolCallParam(
923
937
  id=func_call["call_id"],
924
938
  type="function",
925
939
  function={
926
940
  "name": func_call["name"],
927
- "arguments": func_call["arguments"],
941
+ "arguments": arguments,
928
942
  },
929
943
  )
930
944
  tool_calls.append(new_tool_call)
@@ -967,7 +981,7 @@ class ToolConverter:
967
981
  }
968
982
 
969
983
  raise UserError(
970
- f"Hosted tools are not supported with the ChatCompletions API. FGot tool type: "
984
+ f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
971
985
  f"{type(tool)}, tool: {tool}"
972
986
  )
973
987
 
@@ -246,6 +246,9 @@ class OpenAIResponsesModel(Model):
246
246
  stream=stream,
247
247
  extra_headers=_HEADERS,
248
248
  text=response_format,
249
+ store=self._non_null_or_not_given(model_settings.store),
250
+ reasoning=self._non_null_or_not_given(model_settings.reasoning),
251
+ metadata=model_settings.metadata,
249
252
  )
250
253
 
251
254
  def _get_client(self) -> AsyncOpenAI:
agents/strict_schema.py CHANGED
@@ -54,7 +54,7 @@ def _ensure_strict_json_schema(
54
54
  elif (
55
55
  typ == "object"
56
56
  and "additionalProperties" in json_schema
57
- and json_schema["additionalProperties"] is True
57
+ and json_schema["additionalProperties"]
58
58
  ):
59
59
  raise UserError(
60
60
  "additionalProperties should not be set for object types. This could be because "
@@ -182,7 +182,6 @@ class BatchTraceProcessor(TracingProcessor):
182
182
  # Track when we next *must* perform a scheduled export
183
183
  self._next_export_time = time.time() + self._schedule_delay
184
184
 
185
- self._shutdown_event = threading.Event()
186
185
  self._worker_thread = threading.Thread(target=self._run, daemon=True)
187
186
  self._worker_thread.start()
188
187
 
@@ -236,7 +236,7 @@ class TranscriptionSpanData(SpanData):
236
236
 
237
237
 
238
238
  class SpeechSpanData(SpanData):
239
- __slots__ = ("input", "output", "model", "model_config", "first_byte_at")
239
+ __slots__ = ("input", "output", "model", "model_config", "first_content_at")
240
240
 
241
241
  def __init__(
242
242
  self,
agents/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import importlib.metadata
2
2
 
3
3
  try:
4
- __version__ = importlib.metadata.version("agents")
4
+ __version__ = importlib.metadata.version("openai-agents")
5
5
  except importlib.metadata.PackageNotFoundError:
6
6
  # Fallback if running from source without being installed
7
7
  __version__ = "0.0.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.7
3
+ Version: 0.0.9
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,7 +19,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
- Requires-Dist: mcp; python_version >= '3.10'
22
+ Requires-Dist: mcp<2,>=1.6.0; python_version >= '3.10'
23
23
  Requires-Dist: openai>=1.66.5
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
@@ -1,8 +1,8 @@
1
- agents/__init__.py,sha256=FRMQBdNZiprYebYm2M89-iZnTeeIgcaDvC37G-gHxfA,6802
1
+ agents/__init__.py,sha256=gA9s_CXBfe0jaEa1iWPmDG3weoHwin_KQ1XAgaVmHzw,6854
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=QVTLbSydSaXWmaUuXLsZXD0Iktu2fDqBeh965cA416g,34155
5
- agents/agent.py,sha256=-gnR5pORVfjlybmAgyC_sRU_OU8DZCmYR_ATUpNHgd4,9501
5
+ agents/agent.py,sha256=DJ5ab5VmSraMfxTgbqo5BJIy7Uz1kkMEOCziLqKO6tQ,10056
6
6
  agents/agent_output.py,sha256=sUlsur0_C2pPokyvspo5gxIkM0PtcNxdbZmeu_6Z4TE,5379
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
@@ -12,38 +12,38 @@ agents/handoffs.py,sha256=wRg-HBGKBZev88mOg_mfv6CR8T2kewZM8eX3tb71l1g,9043
12
12
  agents/items.py,sha256=xCoX-ZcUUs3WHN90_o8PQSnX8jt8oQ2TJPz7k74ooQ4,8182
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=4JOqsLswjdrEszNqNEJ_dYjxUMCyt68hOIdgxlXELw0,2169
15
+ agents/model_settings.py,sha256=D42v6BGlWJ3-QcHLxhWVQ-tRVTQDKWk7QgfqgQZxFNo,2292
16
16
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
17
17
  agents/result.py,sha256=JOscHoh2EIUY4w-ESO500Z3DnNYq67vtkRrWr70fOr4,8421
18
18
  agents/run.py,sha256=SHi8PgBIUkNsd5tPy0QKh240bvrDJUQrExNFfV9FPyY,38664
19
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
20
  agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
21
- agents/strict_schema.py,sha256=FEyEvF3ZjxIHRLmraBGZyjJjuFiPCZGaCFV22LlwaTQ,5783
21
+ agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
22
22
  agents/tool.py,sha256=XKeR1khfbaPbyO8DiGsn8WMO_Hkbrmm9NQzGeRsKcPs,11641
23
23
  agents/usage.py,sha256=-MZOmSDVdWxA2V_yVVnmUcwVcLdvYFccv0HXZ7Ow3_A,733
24
- agents/version.py,sha256=bkeg2DaYBS8OnV7R7J6OuF5pNA__0mJ4QZsJjC1DTI0,223
24
+ agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
25
25
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01fX83krKM,1977
27
27
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
28
  agents/extensions/visualization.py,sha256=bHtrkqwapHsp9z3hYfidAJXdhsKnW2KioisQcHRxgzM,4242
29
29
  agents/mcp/__init__.py,sha256=x-4ZFiXNyJPn9Nbwcai6neKgonyRJ7by67HxnOLPgrw,359
30
30
  agents/mcp/server.py,sha256=qbeFEPg2xiUvNKfUlA8qyfDeFsv2yXAJabLG2GhfExQ,11269
31
- agents/mcp/util.py,sha256=RY9_j72OYtAHS702v3WaDoh7BbKA63yBmMnyaQ4wcSM,4494
31
+ agents/mcp/util.py,sha256=dIEdYDMc7Sjp-DFQnvoc4VWU-B7Heyx0I41bcW7RlEg,5232
32
32
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
34
34
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
35
35
  agents/models/interface.py,sha256=dgIlKyPaCbNRTHXxd6x7OQwJuAelG3F-C19P-aacHWQ,3129
36
- agents/models/openai_chatcompletions.py,sha256=KWeVVZFYO-jUZmBcl9UgR_tpdI0u_413g25jVwHYiro,39375
36
+ agents/models/openai_chatcompletions.py,sha256=hd_VN_4UcObOiKlW8-whTCVLd9enI46vsB_FCTNXCAA,40261
37
37
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
38
- agents/models/openai_responses.py,sha256=Vq6TjvWNffmNtWjl2Mmb_H0fo2XHwah3l-kVfd_rgPQ,13492
38
+ agents/models/openai_responses.py,sha256=426m0pvVpxWabzVDirzZunprxvV0DxR0j8pm7pr6cmU,13684
39
39
  agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
40
40
  agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
41
41
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
42
42
  agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
43
- agents/tracing/processors.py,sha256=XBCNY9J89NvzPkgDD5D3o1ItMIL8ALzwpwM_9oe8wbo,10135
43
+ agents/tracing/processors.py,sha256=7FDXVhWj_hk2jv88cFUalF2lqv4KXnruJ74MjS03Euw,10086
44
44
  agents/tracing/scope.py,sha256=84gOESqFfR2E_XCZsT11DLyR-3UTyqxHrfBBjH1Ic44,1373
45
45
  agents/tracing/setup.py,sha256=1wRMIVnsMOx5nWWnldqbTXg44a7-ABcC0jZK4q4I-S8,6729
46
- agents/tracing/span_data.py,sha256=I5TSTnXWa1c71wL2Zr7ITaFwTVnCJiTKkpCqy88juJY,7657
46
+ agents/tracing/span_data.py,sha256=oshubzoDozcWJn1li3B9WA9cdYMrSdvYWDWaI0-OggI,7660
47
47
  agents/tracing/spans.py,sha256=6vVzocGMsdgIma1ksqkBZmhar91xj4RpgcpUC3iibqg,6606
48
48
  agents/tracing/traces.py,sha256=G5LlECSK-DBRFP-bjT8maZjBQulz6SaHILYauUVlfq8,4775
49
49
  agents/tracing/util.py,sha256=x5tAw2YBKggwQ8rH5NG8GiJrFOnPErlJPk7oicBO1dA,501
@@ -69,7 +69,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
69
69
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
70
70
  agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
71
71
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
72
- openai_agents-0.0.7.dist-info/METADATA,sha256=576_zSIWkxKLdJ3fNb11imj2acy3Vy_QZb_PBgycnM8,8123
73
- openai_agents-0.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
74
- openai_agents-0.0.7.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
75
- openai_agents-0.0.7.dist-info/RECORD,,
72
+ openai_agents-0.0.9.dist-info/METADATA,sha256=R8O4XBe-4EjWJhLJWc-K5Kgck34NOUJEXjIGQuLb9a0,8133
73
+ openai_agents-0.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
74
+ openai_agents-0.0.9.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
75
+ openai_agents-0.0.9.dist-info/RECORD,,