openai-agents 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- import dataclasses
4
3
  import json
5
4
  import time
6
5
  from collections.abc import AsyncIterator
@@ -75,7 +74,7 @@ class LitellmModel(Model):
75
74
  ) -> ModelResponse:
76
75
  with generation_span(
77
76
  model=str(self.model),
78
- model_config=dataclasses.asdict(model_settings)
77
+ model_config=model_settings.to_json_dict()
79
78
  | {"base_url": str(self.base_url or ""), "model_impl": "litellm"},
80
79
  disabled=tracing.is_disabled(),
81
80
  ) as span_generation:
@@ -147,7 +146,7 @@ class LitellmModel(Model):
147
146
  ) -> AsyncIterator[TResponseStreamEvent]:
148
147
  with generation_span(
149
148
  model=str(self.model),
150
- model_config=dataclasses.asdict(model_settings)
149
+ model_config=model_settings.to_json_dict()
151
150
  | {"base_url": str(self.base_url or ""), "model_impl": "litellm"},
152
151
  disabled=tracing.is_disabled(),
153
152
  ) as span_generation:
@@ -286,7 +285,7 @@ class LitellmModel(Model):
286
285
  stream=stream,
287
286
  stream_options=stream_options,
288
287
  reasoning_effort=reasoning_effort,
289
- extra_headers=HEADERS,
288
+ extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
290
289
  api_key=self.api_key,
291
290
  base_url=self.base_url,
292
291
  **extra_kwargs,
agents/mcp/server.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import abc
4
4
  import asyncio
5
5
  from contextlib import AbstractAsyncContextManager, AsyncExitStack
6
+ from datetime import timedelta
6
7
  from pathlib import Path
7
8
  from typing import Any, Literal
8
9
 
@@ -54,7 +55,7 @@ class MCPServer(abc.ABC):
54
55
  class _MCPServerWithClientSession(MCPServer, abc.ABC):
55
56
  """Base class for MCP servers that use a `ClientSession` to communicate with the server."""
56
57
 
57
- def __init__(self, cache_tools_list: bool):
58
+ def __init__(self, cache_tools_list: bool, client_session_timeout_seconds: float | None):
58
59
  """
59
60
  Args:
60
61
  cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
@@ -63,12 +64,16 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
63
64
  by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
64
65
  server will not change its tools list, because it can drastically improve latency
65
66
  (by avoiding a round-trip to the server every time).
67
+
68
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
66
69
  """
67
70
  self.session: ClientSession | None = None
68
71
  self.exit_stack: AsyncExitStack = AsyncExitStack()
69
72
  self._cleanup_lock: asyncio.Lock = asyncio.Lock()
70
73
  self.cache_tools_list = cache_tools_list
71
74
 
75
+ self.client_session_timeout_seconds = client_session_timeout_seconds
76
+
72
77
  # The cache is always dirty at startup, so that we fetch tools at least once
73
78
  self._cache_dirty = True
74
79
  self._tools_list: list[MCPTool] | None = None
@@ -101,7 +106,15 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
101
106
  try:
102
107
  transport = await self.exit_stack.enter_async_context(self.create_streams())
103
108
  read, write = transport
104
- session = await self.exit_stack.enter_async_context(ClientSession(read, write))
109
+ session = await self.exit_stack.enter_async_context(
110
+ ClientSession(
111
+ read,
112
+ write,
113
+ timedelta(seconds=self.client_session_timeout_seconds)
114
+ if self.client_session_timeout_seconds
115
+ else None,
116
+ )
117
+ )
105
118
  await session.initialize()
106
119
  self.session = session
107
120
  except Exception as e:
@@ -183,6 +196,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
183
196
  params: MCPServerStdioParams,
184
197
  cache_tools_list: bool = False,
185
198
  name: str | None = None,
199
+ client_session_timeout_seconds: float | None = 5,
186
200
  ):
187
201
  """Create a new MCP server based on the stdio transport.
188
202
 
@@ -199,8 +213,9 @@ class MCPServerStdio(_MCPServerWithClientSession):
199
213
  improve latency (by avoiding a round-trip to the server every time).
200
214
  name: A readable name for the server. If not provided, we'll create one from the
201
215
  command.
216
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
202
217
  """
203
- super().__init__(cache_tools_list)
218
+ super().__init__(cache_tools_list, client_session_timeout_seconds)
204
219
 
205
220
  self.params = StdioServerParameters(
206
221
  command=params["command"],
@@ -257,6 +272,7 @@ class MCPServerSse(_MCPServerWithClientSession):
257
272
  params: MCPServerSseParams,
258
273
  cache_tools_list: bool = False,
259
274
  name: str | None = None,
275
+ client_session_timeout_seconds: float | None = 5,
260
276
  ):
261
277
  """Create a new MCP server based on the HTTP with SSE transport.
262
278
 
@@ -274,8 +290,10 @@ class MCPServerSse(_MCPServerWithClientSession):
274
290
 
275
291
  name: A readable name for the server. If not provided, we'll create one from the
276
292
  URL.
293
+
294
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
277
295
  """
278
- super().__init__(cache_tools_list)
296
+ super().__init__(cache_tools_list, client_session_timeout_seconds)
279
297
 
280
298
  self.params = params
281
299
  self._name = name or f"sse: {self.params['url']}"
agents/model_settings.py CHANGED
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import dataclasses
3
4
  from dataclasses import dataclass, fields, replace
4
- from typing import Literal
5
+ from typing import Any, Literal
5
6
 
6
- from openai._types import Body, Query
7
+ from openai._types import Body, Headers, Query
7
8
  from openai.types.shared import Reasoning
9
+ from pydantic import BaseModel
8
10
 
9
11
 
10
12
  @dataclass
@@ -67,6 +69,10 @@ class ModelSettings:
67
69
  """Additional body fields to provide with the request.
68
70
  Defaults to None if not provided."""
69
71
 
72
+ extra_headers: Headers | None = None
73
+ """Additional headers to provide with the request.
74
+ Defaults to None if not provided."""
75
+
70
76
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
71
77
  """Produce a new ModelSettings by overlaying any non-None values from the
72
78
  override on top of this instance."""
@@ -79,3 +85,16 @@ class ModelSettings:
79
85
  if getattr(override, field.name) is not None
80
86
  }
81
87
  return replace(self, **changes)
88
+
89
+ def to_json_dict(self) -> dict[str, Any]:
90
+ dataclass_dict = dataclasses.asdict(self)
91
+
92
+ json_dict: dict[str, Any] = {}
93
+
94
+ for field_name, value in dataclass_dict.items():
95
+ if isinstance(value, BaseModel):
96
+ json_dict[field_name] = value.model_dump(mode="json")
97
+ else:
98
+ json_dict[field_name] = value
99
+
100
+ return json_dict
@@ -56,7 +56,8 @@ class ChatCmplStreamHandler:
56
56
  type="response.created",
57
57
  )
58
58
 
59
- usage = chunk.usage
59
+ # This is always set by the OpenAI API, but not by others e.g. LiteLLM
60
+ usage = chunk.usage if hasattr(chunk, "usage") else None
60
61
 
61
62
  if not chunk.choices or not chunk.choices[0].delta:
62
63
  continue
@@ -112,7 +113,8 @@ class ChatCmplStreamHandler:
112
113
  state.text_content_index_and_output[1].text += delta.content
113
114
 
114
115
  # Handle refusals (model declines to answer)
115
- if delta.refusal:
116
+ # This is always set by the OpenAI API, but not by others e.g. LiteLLM
117
+ if hasattr(delta, "refusal") and delta.refusal:
116
118
  if not state.refusal_content_index_and_output:
117
119
  # Initialize a content tracker for streaming refusal text
118
120
  state.refusal_content_index_and_output = (
@@ -1,6 +1,5 @@
1
1
  from __future__ import annotations
2
2
 
3
- import dataclasses
4
3
  import json
5
4
  import time
6
5
  from collections.abc import AsyncIterator
@@ -56,8 +55,7 @@ class OpenAIChatCompletionsModel(Model):
56
55
  ) -> ModelResponse:
57
56
  with generation_span(
58
57
  model=str(self.model),
59
- model_config=dataclasses.asdict(model_settings)
60
- | {"base_url": str(self._client.base_url)},
58
+ model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)},
61
59
  disabled=tracing.is_disabled(),
62
60
  ) as span_generation:
63
61
  response = await self._fetch_response(
@@ -121,8 +119,7 @@ class OpenAIChatCompletionsModel(Model):
121
119
  """
122
120
  with generation_span(
123
121
  model=str(self.model),
124
- model_config=dataclasses.asdict(model_settings)
125
- | {"base_url": str(self._client.base_url)},
122
+ model_config=model_settings.to_json_dict() | {"base_url": str(self._client.base_url)},
126
123
  disabled=tracing.is_disabled(),
127
124
  ) as span_generation:
128
125
  response, stream = await self._fetch_response(
@@ -255,7 +252,7 @@ class OpenAIChatCompletionsModel(Model):
255
252
  stream_options=self._non_null_or_not_given(stream_options),
256
253
  store=self._non_null_or_not_given(store),
257
254
  reasoning_effort=self._non_null_or_not_given(reasoning_effort),
258
- extra_headers=HEADERS,
255
+ extra_headers={ **HEADERS, **(model_settings.extra_headers or {}) },
259
256
  extra_query=model_settings.extra_query,
260
257
  extra_body=model_settings.extra_body,
261
258
  metadata=self._non_null_or_not_given(model_settings.metadata),
@@ -253,7 +253,7 @@ class OpenAIResponsesModel(Model):
253
253
  tool_choice=tool_choice,
254
254
  parallel_tool_calls=parallel_tool_calls,
255
255
  stream=stream,
256
- extra_headers=_HEADERS,
256
+ extra_headers={**_HEADERS, **(model_settings.extra_headers or {})},
257
257
  extra_query=model_settings.extra_query,
258
258
  extra_body=model_settings.extra_body,
259
259
  text=response_format,
agents/result.py CHANGED
@@ -75,7 +75,9 @@ class RunResultBase(abc.ABC):
75
75
 
76
76
  def to_input_list(self) -> list[TResponseInputItem]:
77
77
  """Creates a new input list, merging the original input with all the new items generated."""
78
- original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(self.input)
78
+ original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(
79
+ self.input
80
+ )
79
81
  new_items = [item.to_input_item() for item in self.new_items]
80
82
 
81
83
  return original_items + new_items
@@ -152,6 +154,18 @@ class RunResultStreaming(RunResultBase):
152
154
  """
153
155
  return self.current_agent
154
156
 
157
+ def cancel(self) -> None:
158
+ """Cancels the streaming run, stopping all background tasks and marking the run as
159
+ complete."""
160
+ self._cleanup_tasks() # Cancel all running tasks
161
+ self.is_complete = True # Mark the run as complete to stop event streaming
162
+
163
+ # Optionally, clear the event queue to prevent processing stale events
164
+ while not self._event_queue.empty():
165
+ self._event_queue.get_nowait()
166
+ while not self._input_guardrail_queue.empty():
167
+ self._input_guardrail_queue.get_nowait()
168
+
155
169
  async def stream_events(self) -> AsyncIterator[StreamEvent]:
156
170
  """Stream deltas for new items as they are generated. We're using the types from the
157
171
  OpenAI Responses API, so these are semantic events: each event has a `type` field that
@@ -192,13 +206,17 @@ class RunResultStreaming(RunResultBase):
192
206
 
193
207
  def _check_errors(self):
194
208
  if self.current_turn > self.max_turns:
195
- self._stored_exception = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded")
209
+ self._stored_exception = MaxTurnsExceeded(
210
+ f"Max turns ({self.max_turns}) exceeded"
211
+ )
196
212
 
197
213
  # Fetch all the completed guardrail results from the queue and raise if needed
198
214
  while not self._input_guardrail_queue.empty():
199
215
  guardrail_result = self._input_guardrail_queue.get_nowait()
200
216
  if guardrail_result.output.tripwire_triggered:
201
- self._stored_exception = InputGuardrailTripwireTriggered(guardrail_result)
217
+ self._stored_exception = InputGuardrailTripwireTriggered(
218
+ guardrail_result
219
+ )
202
220
 
203
221
  # Check the tasks for any exceptions
204
222
  if self._run_impl_task and self._run_impl_task.done():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,7 +20,7 @@ Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
22
  Requires-Dist: mcp<2,>=1.6.0; python_version >= '3.10'
23
- Requires-Dist: openai>=1.66.5
23
+ Requires-Dist: openai>=1.76.0
24
24
  Requires-Dist: pydantic<3,>=2.10
25
25
  Requires-Dist: requests<3,>=2.0
26
26
  Requires-Dist: types-requests<3,>=2.0
@@ -12,9 +12,9 @@ agents/handoffs.py,sha256=wRg-HBGKBZev88mOg_mfv6CR8T2kewZM8eX3tb71l1g,9043
12
12
  agents/items.py,sha256=6Xnf6a2tIgM8Pz3T2Xr6J8wgok8fI-KhyKW1XdfHBJU,8306
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=9_YyNOI2MDHxk5MnhTOZhbaE4bm4aJSowqxlP_awDZk,2724
15
+ agents/model_settings.py,sha256=7s9YjfHBVz1f1a-V3dd-8eMe-IAgfDXhQgChI27Kz00,3326
16
16
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
17
- agents/result.py,sha256=z2gU5dHF1BMcTc_c_HWIZ9Kk0pa4jK_2ivVV0nBZwE4,8607
17
+ agents/result.py,sha256=WjRTfmM9UOEqcliUX_oUfpHjfzp9b7GuS34SgCngr9U,9258
18
18
  agents/run.py,sha256=Fm1dHG9cuUuv17B7AXEEeskal5uxrkRy0CJrPbYE2k4,40121
19
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
20
  agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
@@ -27,22 +27,22 @@ agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01f
27
27
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
28
  agents/extensions/visualization.py,sha256=AQFC7kQlZqTI6QVkyDHrF_DodCytrrhcLg35nfRd_JA,4256
29
29
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- agents/extensions/models/litellm_model.py,sha256=RG3AWH2BHKeDTX0MDQAtzT5IRFFEFEBpC6PPC_3ai_I,13616
30
+ agents/extensions/models/litellm_model.py,sha256=sqlUA4uS6jJExf05oY-ZdhlX_97ZX47Eg5zHmR0UkyI,13631
31
31
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
32
32
  agents/mcp/__init__.py,sha256=x-4ZFiXNyJPn9Nbwcai6neKgonyRJ7by67HxnOLPgrw,359
33
- agents/mcp/server.py,sha256=3r7D2x8RYj6sMeizecXmyRmyuSx8rutlQ02Xb0Hbl_w,11290
33
+ agents/mcp/server.py,sha256=vB3K2GREyrQv2ikRz2m4EtEY0nynQ5X-7sLbOH3s29E,12163
34
34
  agents/mcp/util.py,sha256=dIEdYDMc7Sjp-DFQnvoc4VWU-B7Heyx0I41bcW7RlEg,5232
35
35
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
37
37
  agents/models/chatcmpl_converter.py,sha256=C4EOZQDffCRDDRGT8XEobOHJoX7ONaiETDz2MeTviHg,18114
38
38
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
39
- agents/models/chatcmpl_stream_handler.py,sha256=cwaaaijQVwegYcB1DNlShslkkMx5NV5lWsEiynX_3y0,12219
39
+ agents/models/chatcmpl_stream_handler.py,sha256=VjskdeGnepn0iJbxsqNZrexcuAYAV1zd5hwt0lU8E7I,12452
40
40
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
41
41
  agents/models/interface.py,sha256=eEpiIBn9MxsmXUK1HPpn3c7TYPduBYC7tsWnDHSYJHo,3553
42
42
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
43
- agents/models/openai_chatcompletions.py,sha256=20SKh9G5nwkzKE3R_up56Pbp9JtV7hX1GZVef_J2scs,10397
43
+ agents/models/openai_chatcompletions.py,sha256=QiUOdd4gQ7f-uslm4SqRlv9bt3T1oFL87EnqVYlWw4A,10390
44
44
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
45
- agents/models/openai_responses.py,sha256=Qr0n1hMAc5f1SHdDJE6PargQScqPPiGlSpuyy_F439E,14287
45
+ agents/models/openai_responses.py,sha256=-hwXW7gXYOs4EbVrFhsil-tWb63gtLj_vaGQ9HXf6nE,14331
46
46
  agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
47
47
  agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
48
48
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -76,7 +76,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
76
76
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
77
77
  agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
78
78
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
79
- openai_agents-0.0.12.dist-info/METADATA,sha256=Q0zSGyyTHMpG_cLbPvwinGNBFYUMlpfojTxNuB_YkfY,8157
80
- openai_agents-0.0.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- openai_agents-0.0.12.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
- openai_agents-0.0.12.dist-info/RECORD,,
79
+ openai_agents-0.0.13.dist-info/METADATA,sha256=0b4sOGaK4JbQC-SId-Xn4WB_AmCRFCUHxymAJEdhYMU,8157
80
+ openai_agents-0.0.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
+ openai_agents-0.0.13.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
82
+ openai_agents-0.0.13.dist-info/RECORD,,