openai-agents 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/mcp/server.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import abc
4
4
  import asyncio
5
5
  from contextlib import AbstractAsyncContextManager, AsyncExitStack
6
+ from datetime import timedelta
6
7
  from pathlib import Path
7
8
  from typing import Any, Literal
8
9
 
@@ -54,7 +55,7 @@ class MCPServer(abc.ABC):
54
55
  class _MCPServerWithClientSession(MCPServer, abc.ABC):
55
56
  """Base class for MCP servers that use a `ClientSession` to communicate with the server."""
56
57
 
57
- def __init__(self, cache_tools_list: bool):
58
+ def __init__(self, cache_tools_list: bool, client_session_timeout_seconds: float | None):
58
59
  """
59
60
  Args:
60
61
  cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
@@ -63,12 +64,16 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
63
64
  by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
64
65
  server will not change its tools list, because it can drastically improve latency
65
66
  (by avoiding a round-trip to the server every time).
67
+
68
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
66
69
  """
67
70
  self.session: ClientSession | None = None
68
71
  self.exit_stack: AsyncExitStack = AsyncExitStack()
69
72
  self._cleanup_lock: asyncio.Lock = asyncio.Lock()
70
73
  self.cache_tools_list = cache_tools_list
71
74
 
75
+ self.client_session_timeout_seconds = client_session_timeout_seconds
76
+
72
77
  # The cache is always dirty at startup, so that we fetch tools at least once
73
78
  self._cache_dirty = True
74
79
  self._tools_list: list[MCPTool] | None = None
@@ -101,7 +106,15 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
101
106
  try:
102
107
  transport = await self.exit_stack.enter_async_context(self.create_streams())
103
108
  read, write = transport
104
- session = await self.exit_stack.enter_async_context(ClientSession(read, write))
109
+ session = await self.exit_stack.enter_async_context(
110
+ ClientSession(
111
+ read,
112
+ write,
113
+ timedelta(seconds=self.client_session_timeout_seconds)
114
+ if self.client_session_timeout_seconds
115
+ else None,
116
+ )
117
+ )
105
118
  await session.initialize()
106
119
  self.session = session
107
120
  except Exception as e:
@@ -137,9 +150,10 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
137
150
  async with self._cleanup_lock:
138
151
  try:
139
152
  await self.exit_stack.aclose()
140
- self.session = None
141
153
  except Exception as e:
142
154
  logger.error(f"Error cleaning up server: {e}")
155
+ finally:
156
+ self.session = None
143
157
 
144
158
 
145
159
  class MCPServerStdioParams(TypedDict):
@@ -182,6 +196,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
182
196
  params: MCPServerStdioParams,
183
197
  cache_tools_list: bool = False,
184
198
  name: str | None = None,
199
+ client_session_timeout_seconds: float | None = 5,
185
200
  ):
186
201
  """Create a new MCP server based on the stdio transport.
187
202
 
@@ -198,8 +213,9 @@ class MCPServerStdio(_MCPServerWithClientSession):
198
213
  improve latency (by avoiding a round-trip to the server every time).
199
214
  name: A readable name for the server. If not provided, we'll create one from the
200
215
  command.
216
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
201
217
  """
202
- super().__init__(cache_tools_list)
218
+ super().__init__(cache_tools_list, client_session_timeout_seconds)
203
219
 
204
220
  self.params = StdioServerParameters(
205
221
  command=params["command"],
@@ -256,6 +272,7 @@ class MCPServerSse(_MCPServerWithClientSession):
256
272
  params: MCPServerSseParams,
257
273
  cache_tools_list: bool = False,
258
274
  name: str | None = None,
275
+ client_session_timeout_seconds: float | None = 5,
259
276
  ):
260
277
  """Create a new MCP server based on the HTTP with SSE transport.
261
278
 
@@ -273,8 +290,10 @@ class MCPServerSse(_MCPServerWithClientSession):
273
290
 
274
291
  name: A readable name for the server. If not provided, we'll create one from the
275
292
  URL.
293
+
294
+ client_session_timeout_seconds: the read timeout passed to the MCP ClientSession.
276
295
  """
277
- super().__init__(cache_tools_list)
296
+ super().__init__(cache_tools_list, client_session_timeout_seconds)
278
297
 
279
298
  self.params = params
280
299
  self._name = name or f"sse: {self.params['url']}"
agents/model_settings.py CHANGED
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import dataclasses
3
4
  from dataclasses import dataclass, fields, replace
4
- from typing import Literal
5
+ from typing import Any, Literal
5
6
 
6
- from openai._types import Body, Query
7
+ from openai._types import Body, Headers, Query
7
8
  from openai.types.shared import Reasoning
9
+ from pydantic import BaseModel
8
10
 
9
11
 
10
12
  @dataclass
@@ -67,6 +69,10 @@ class ModelSettings:
67
69
  """Additional body fields to provide with the request.
68
70
  Defaults to None if not provided."""
69
71
 
72
+ extra_headers: Headers | None = None
73
+ """Additional headers to provide with the request.
74
+ Defaults to None if not provided."""
75
+
70
76
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
71
77
  """Produce a new ModelSettings by overlaying any non-None values from the
72
78
  override on top of this instance."""
@@ -79,3 +85,16 @@ class ModelSettings:
79
85
  if getattr(override, field.name) is not None
80
86
  }
81
87
  return replace(self, **changes)
88
+
89
+ def to_json_dict(self) -> dict[str, Any]:
90
+ dataclass_dict = dataclasses.asdict(self)
91
+
92
+ json_dict: dict[str, Any] = {}
93
+
94
+ for field_name, value in dataclass_dict.items():
95
+ if isinstance(value, BaseModel):
96
+ json_dict[field_name] = value.model_dump(mode="json")
97
+ else:
98
+ json_dict[field_name] = value
99
+
100
+ return json_dict
@@ -0,0 +1,466 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from collections.abc import Iterable
5
+ from typing import Any, Literal, cast
6
+
7
+ from openai import NOT_GIVEN, NotGiven
8
+ from openai.types.chat import (
9
+ ChatCompletionAssistantMessageParam,
10
+ ChatCompletionContentPartImageParam,
11
+ ChatCompletionContentPartParam,
12
+ ChatCompletionContentPartTextParam,
13
+ ChatCompletionDeveloperMessageParam,
14
+ ChatCompletionMessage,
15
+ ChatCompletionMessageParam,
16
+ ChatCompletionMessageToolCallParam,
17
+ ChatCompletionSystemMessageParam,
18
+ ChatCompletionToolChoiceOptionParam,
19
+ ChatCompletionToolMessageParam,
20
+ ChatCompletionUserMessageParam,
21
+ )
22
+ from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
23
+ from openai.types.chat.completion_create_params import ResponseFormat
24
+ from openai.types.responses import (
25
+ EasyInputMessageParam,
26
+ ResponseFileSearchToolCallParam,
27
+ ResponseFunctionToolCall,
28
+ ResponseFunctionToolCallParam,
29
+ ResponseInputContentParam,
30
+ ResponseInputImageParam,
31
+ ResponseInputTextParam,
32
+ ResponseOutputMessage,
33
+ ResponseOutputMessageParam,
34
+ ResponseOutputRefusal,
35
+ ResponseOutputText,
36
+ )
37
+ from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
38
+
39
+ from ..agent_output import AgentOutputSchemaBase
40
+ from ..exceptions import AgentsException, UserError
41
+ from ..handoffs import Handoff
42
+ from ..items import TResponseInputItem, TResponseOutputItem
43
+ from ..tool import FunctionTool, Tool
44
+ from .fake_id import FAKE_RESPONSES_ID
45
+
46
+
47
+ class Converter:
48
+ @classmethod
49
+ def convert_tool_choice(
50
+ cls, tool_choice: Literal["auto", "required", "none"] | str | None
51
+ ) -> ChatCompletionToolChoiceOptionParam | NotGiven:
52
+ if tool_choice is None:
53
+ return NOT_GIVEN
54
+ elif tool_choice == "auto":
55
+ return "auto"
56
+ elif tool_choice == "required":
57
+ return "required"
58
+ elif tool_choice == "none":
59
+ return "none"
60
+ else:
61
+ return {
62
+ "type": "function",
63
+ "function": {
64
+ "name": tool_choice,
65
+ },
66
+ }
67
+
68
+ @classmethod
69
+ def convert_response_format(
70
+ cls, final_output_schema: AgentOutputSchemaBase | None
71
+ ) -> ResponseFormat | NotGiven:
72
+ if not final_output_schema or final_output_schema.is_plain_text():
73
+ return NOT_GIVEN
74
+
75
+ return {
76
+ "type": "json_schema",
77
+ "json_schema": {
78
+ "name": "final_output",
79
+ "strict": final_output_schema.is_strict_json_schema(),
80
+ "schema": final_output_schema.json_schema(),
81
+ },
82
+ }
83
+
84
+ @classmethod
85
+ def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
86
+ items: list[TResponseOutputItem] = []
87
+
88
+ message_item = ResponseOutputMessage(
89
+ id=FAKE_RESPONSES_ID,
90
+ content=[],
91
+ role="assistant",
92
+ type="message",
93
+ status="completed",
94
+ )
95
+ if message.content:
96
+ message_item.content.append(
97
+ ResponseOutputText(text=message.content, type="output_text", annotations=[])
98
+ )
99
+ if message.refusal:
100
+ message_item.content.append(
101
+ ResponseOutputRefusal(refusal=message.refusal, type="refusal")
102
+ )
103
+ if message.audio:
104
+ raise AgentsException("Audio is not currently supported")
105
+
106
+ if message_item.content:
107
+ items.append(message_item)
108
+
109
+ if message.tool_calls:
110
+ for tool_call in message.tool_calls:
111
+ items.append(
112
+ ResponseFunctionToolCall(
113
+ id=FAKE_RESPONSES_ID,
114
+ call_id=tool_call.id,
115
+ arguments=tool_call.function.arguments,
116
+ name=tool_call.function.name,
117
+ type="function_call",
118
+ )
119
+ )
120
+
121
+ return items
122
+
123
+ @classmethod
124
+ def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None:
125
+ if not isinstance(item, dict):
126
+ return None
127
+
128
+ keys = item.keys()
129
+ # EasyInputMessageParam only has these two keys
130
+ if keys != {"content", "role"}:
131
+ return None
132
+
133
+ role = item.get("role", None)
134
+ if role not in ("user", "assistant", "system", "developer"):
135
+ return None
136
+
137
+ if "content" not in item:
138
+ return None
139
+
140
+ return cast(EasyInputMessageParam, item)
141
+
142
+ @classmethod
143
+ def maybe_input_message(cls, item: Any) -> Message | None:
144
+ if (
145
+ isinstance(item, dict)
146
+ and item.get("type") == "message"
147
+ and item.get("role")
148
+ in (
149
+ "user",
150
+ "system",
151
+ "developer",
152
+ )
153
+ ):
154
+ return cast(Message, item)
155
+
156
+ return None
157
+
158
+ @classmethod
159
+ def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None:
160
+ if isinstance(item, dict) and item.get("type") == "file_search_call":
161
+ return cast(ResponseFileSearchToolCallParam, item)
162
+ return None
163
+
164
+ @classmethod
165
+ def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None:
166
+ if isinstance(item, dict) and item.get("type") == "function_call":
167
+ return cast(ResponseFunctionToolCallParam, item)
168
+ return None
169
+
170
+ @classmethod
171
+ def maybe_function_tool_call_output(
172
+ cls,
173
+ item: Any,
174
+ ) -> FunctionCallOutput | None:
175
+ if isinstance(item, dict) and item.get("type") == "function_call_output":
176
+ return cast(FunctionCallOutput, item)
177
+ return None
178
+
179
+ @classmethod
180
+ def maybe_item_reference(cls, item: Any) -> ItemReference | None:
181
+ if isinstance(item, dict) and item.get("type") == "item_reference":
182
+ return cast(ItemReference, item)
183
+ return None
184
+
185
+ @classmethod
186
+ def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None:
187
+ # ResponseOutputMessage is only used for messages with role assistant
188
+ if (
189
+ isinstance(item, dict)
190
+ and item.get("type") == "message"
191
+ and item.get("role") == "assistant"
192
+ ):
193
+ return cast(ResponseOutputMessageParam, item)
194
+ return None
195
+
196
+ @classmethod
197
+ def extract_text_content(
198
+ cls, content: str | Iterable[ResponseInputContentParam]
199
+ ) -> str | list[ChatCompletionContentPartTextParam]:
200
+ all_content = cls.extract_all_content(content)
201
+ if isinstance(all_content, str):
202
+ return all_content
203
+ out: list[ChatCompletionContentPartTextParam] = []
204
+ for c in all_content:
205
+ if c.get("type") == "text":
206
+ out.append(cast(ChatCompletionContentPartTextParam, c))
207
+ return out
208
+
209
+ @classmethod
210
+ def extract_all_content(
211
+ cls, content: str | Iterable[ResponseInputContentParam]
212
+ ) -> str | list[ChatCompletionContentPartParam]:
213
+ if isinstance(content, str):
214
+ return content
215
+ out: list[ChatCompletionContentPartParam] = []
216
+
217
+ for c in content:
218
+ if isinstance(c, dict) and c.get("type") == "input_text":
219
+ casted_text_param = cast(ResponseInputTextParam, c)
220
+ out.append(
221
+ ChatCompletionContentPartTextParam(
222
+ type="text",
223
+ text=casted_text_param["text"],
224
+ )
225
+ )
226
+ elif isinstance(c, dict) and c.get("type") == "input_image":
227
+ casted_image_param = cast(ResponseInputImageParam, c)
228
+ if "image_url" not in casted_image_param or not casted_image_param["image_url"]:
229
+ raise UserError(
230
+ f"Only image URLs are supported for input_image {casted_image_param}"
231
+ )
232
+ out.append(
233
+ ChatCompletionContentPartImageParam(
234
+ type="image_url",
235
+ image_url={
236
+ "url": casted_image_param["image_url"],
237
+ "detail": casted_image_param["detail"],
238
+ },
239
+ )
240
+ )
241
+ elif isinstance(c, dict) and c.get("type") == "input_file":
242
+ raise UserError(f"File uploads are not supported for chat completions {c}")
243
+ else:
244
+ raise UserError(f"Unknown content: {c}")
245
+ return out
246
+
247
+ @classmethod
248
+ def items_to_messages(
249
+ cls,
250
+ items: str | Iterable[TResponseInputItem],
251
+ ) -> list[ChatCompletionMessageParam]:
252
+ """
253
+ Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
254
+
255
+ Rules:
256
+ - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
257
+ - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
258
+ - EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam
259
+ - InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam
260
+ - response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam
261
+ - tool calls get attached to the *current* assistant message, or create one if none.
262
+ - tool outputs => ChatCompletionToolMessageParam
263
+ """
264
+
265
+ if isinstance(items, str):
266
+ return [
267
+ ChatCompletionUserMessageParam(
268
+ role="user",
269
+ content=items,
270
+ )
271
+ ]
272
+
273
+ result: list[ChatCompletionMessageParam] = []
274
+ current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
275
+
276
+ def flush_assistant_message() -> None:
277
+ nonlocal current_assistant_msg
278
+ if current_assistant_msg is not None:
279
+ # The API doesn't support empty arrays for tool_calls
280
+ if not current_assistant_msg.get("tool_calls"):
281
+ del current_assistant_msg["tool_calls"]
282
+ result.append(current_assistant_msg)
283
+ current_assistant_msg = None
284
+
285
+ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
286
+ nonlocal current_assistant_msg
287
+ if current_assistant_msg is None:
288
+ current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
289
+ current_assistant_msg["tool_calls"] = []
290
+ return current_assistant_msg
291
+
292
+ for item in items:
293
+ # 1) Check easy input message
294
+ if easy_msg := cls.maybe_easy_input_message(item):
295
+ role = easy_msg["role"]
296
+ content = easy_msg["content"]
297
+
298
+ if role == "user":
299
+ flush_assistant_message()
300
+ msg_user: ChatCompletionUserMessageParam = {
301
+ "role": "user",
302
+ "content": cls.extract_all_content(content),
303
+ }
304
+ result.append(msg_user)
305
+ elif role == "system":
306
+ flush_assistant_message()
307
+ msg_system: ChatCompletionSystemMessageParam = {
308
+ "role": "system",
309
+ "content": cls.extract_text_content(content),
310
+ }
311
+ result.append(msg_system)
312
+ elif role == "developer":
313
+ flush_assistant_message()
314
+ msg_developer: ChatCompletionDeveloperMessageParam = {
315
+ "role": "developer",
316
+ "content": cls.extract_text_content(content),
317
+ }
318
+ result.append(msg_developer)
319
+ elif role == "assistant":
320
+ flush_assistant_message()
321
+ msg_assistant: ChatCompletionAssistantMessageParam = {
322
+ "role": "assistant",
323
+ "content": cls.extract_text_content(content),
324
+ }
325
+ result.append(msg_assistant)
326
+ else:
327
+ raise UserError(f"Unexpected role in easy_input_message: {role}")
328
+
329
+ # 2) Check input message
330
+ elif in_msg := cls.maybe_input_message(item):
331
+ role = in_msg["role"]
332
+ content = in_msg["content"]
333
+ flush_assistant_message()
334
+
335
+ if role == "user":
336
+ msg_user = {
337
+ "role": "user",
338
+ "content": cls.extract_all_content(content),
339
+ }
340
+ result.append(msg_user)
341
+ elif role == "system":
342
+ msg_system = {
343
+ "role": "system",
344
+ "content": cls.extract_text_content(content),
345
+ }
346
+ result.append(msg_system)
347
+ elif role == "developer":
348
+ msg_developer = {
349
+ "role": "developer",
350
+ "content": cls.extract_text_content(content),
351
+ }
352
+ result.append(msg_developer)
353
+ else:
354
+ raise UserError(f"Unexpected role in input_message: {role}")
355
+
356
+ # 3) response output message => assistant
357
+ elif resp_msg := cls.maybe_response_output_message(item):
358
+ flush_assistant_message()
359
+ new_asst = ChatCompletionAssistantMessageParam(role="assistant")
360
+ contents = resp_msg["content"]
361
+
362
+ text_segments = []
363
+ for c in contents:
364
+ if c["type"] == "output_text":
365
+ text_segments.append(c["text"])
366
+ elif c["type"] == "refusal":
367
+ new_asst["refusal"] = c["refusal"]
368
+ elif c["type"] == "output_audio":
369
+ # Can't handle this, b/c chat completions expects an ID which we dont have
370
+ raise UserError(
371
+ f"Only audio IDs are supported for chat completions, but got: {c}"
372
+ )
373
+ else:
374
+ raise UserError(f"Unknown content type in ResponseOutputMessage: {c}")
375
+
376
+ if text_segments:
377
+ combined = "\n".join(text_segments)
378
+ new_asst["content"] = combined
379
+
380
+ new_asst["tool_calls"] = []
381
+ current_assistant_msg = new_asst
382
+
383
+ # 4) function/file-search calls => attach to assistant
384
+ elif file_search := cls.maybe_file_search_call(item):
385
+ asst = ensure_assistant_message()
386
+ tool_calls = list(asst.get("tool_calls", []))
387
+ new_tool_call = ChatCompletionMessageToolCallParam(
388
+ id=file_search["id"],
389
+ type="function",
390
+ function={
391
+ "name": "file_search_call",
392
+ "arguments": json.dumps(
393
+ {
394
+ "queries": file_search.get("queries", []),
395
+ "status": file_search.get("status"),
396
+ }
397
+ ),
398
+ },
399
+ )
400
+ tool_calls.append(new_tool_call)
401
+ asst["tool_calls"] = tool_calls
402
+
403
+ elif func_call := cls.maybe_function_tool_call(item):
404
+ asst = ensure_assistant_message()
405
+ tool_calls = list(asst.get("tool_calls", []))
406
+ arguments = func_call["arguments"] if func_call["arguments"] else "{}"
407
+ new_tool_call = ChatCompletionMessageToolCallParam(
408
+ id=func_call["call_id"],
409
+ type="function",
410
+ function={
411
+ "name": func_call["name"],
412
+ "arguments": arguments,
413
+ },
414
+ )
415
+ tool_calls.append(new_tool_call)
416
+ asst["tool_calls"] = tool_calls
417
+ # 5) function call output => tool message
418
+ elif func_output := cls.maybe_function_tool_call_output(item):
419
+ flush_assistant_message()
420
+ msg: ChatCompletionToolMessageParam = {
421
+ "role": "tool",
422
+ "tool_call_id": func_output["call_id"],
423
+ "content": func_output["output"],
424
+ }
425
+ result.append(msg)
426
+
427
+ # 6) item reference => handle or raise
428
+ elif item_ref := cls.maybe_item_reference(item):
429
+ raise UserError(
430
+ f"Encountered an item_reference, which is not supported: {item_ref}"
431
+ )
432
+
433
+ # 7) If we haven't recognized it => fail or ignore
434
+ else:
435
+ raise UserError(f"Unhandled item type or structure: {item}")
436
+
437
+ flush_assistant_message()
438
+ return result
439
+
440
+ @classmethod
441
+ def tool_to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
442
+ if isinstance(tool, FunctionTool):
443
+ return {
444
+ "type": "function",
445
+ "function": {
446
+ "name": tool.name,
447
+ "description": tool.description or "",
448
+ "parameters": tool.params_json_schema,
449
+ },
450
+ }
451
+
452
+ raise UserError(
453
+ f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
454
+ f"{type(tool)}, tool: {tool}"
455
+ )
456
+
457
+ @classmethod
458
+ def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam:
459
+ return {
460
+ "type": "function",
461
+ "function": {
462
+ "name": handoff.tool_name,
463
+ "description": handoff.tool_description,
464
+ "parameters": handoff.input_json_schema,
465
+ },
466
+ }
@@ -0,0 +1,37 @@
1
+ from __future__ import annotations
2
+
3
+ from openai import AsyncOpenAI
4
+
5
+ from ..model_settings import ModelSettings
6
+ from ..version import __version__
7
+
8
+ _USER_AGENT = f"Agents/Python {__version__}"
9
+ HEADERS = {"User-Agent": _USER_AGENT}
10
+
11
+
12
+ class ChatCmplHelpers:
13
+ @classmethod
14
+ def is_openai(cls, client: AsyncOpenAI):
15
+ return str(client.base_url).startswith("https://api.openai.com")
16
+
17
+ @classmethod
18
+ def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:
19
+ # Match the behavior of Responses where store is True when not given
20
+ default_store = True if cls.is_openai(client) else None
21
+ return model_settings.store if model_settings.store is not None else default_store
22
+
23
+ @classmethod
24
+ def get_stream_options_param(
25
+ cls, client: AsyncOpenAI, model_settings: ModelSettings, stream: bool
26
+ ) -> dict[str, bool] | None:
27
+ if not stream:
28
+ return None
29
+
30
+ default_include_usage = True if cls.is_openai(client) else None
31
+ include_usage = (
32
+ model_settings.include_usage
33
+ if model_settings.include_usage is not None
34
+ else default_include_usage
35
+ )
36
+ stream_options = {"include_usage": include_usage} if include_usage is not None else None
37
+ return stream_options