openai-agents 0.0.10__py3-none-any.whl → 0.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +2 -1
- agents/_run_impl.py +3 -3
- agents/agent.py +9 -2
- agents/agent_output.py +58 -8
- agents/extensions/models/__init__.py +0 -0
- agents/extensions/models/litellm_model.py +382 -0
- agents/extensions/models/litellm_provider.py +21 -0
- agents/extensions/visualization.py +1 -1
- agents/mcp/server.py +2 -1
- agents/models/chatcmpl_converter.py +466 -0
- agents/models/chatcmpl_helpers.py +37 -0
- agents/models/chatcmpl_stream_handler.py +290 -0
- agents/models/interface.py +3 -3
- agents/models/multi_provider.py +144 -0
- agents/models/openai_chatcompletions.py +35 -773
- agents/models/openai_responses.py +8 -8
- agents/result.py +3 -6
- agents/run.py +16 -13
- agents/tracing/processors.py +6 -0
- agents/tracing/span_data.py +1 -1
- {openai_agents-0.0.10.dist-info → openai_agents-0.0.12.dist-info}/METADATA +4 -4
- {openai_agents-0.0.10.dist-info → openai_agents-0.0.12.dist-info}/RECORD +24 -17
- {openai_agents-0.0.10.dist-info → openai_agents-0.0.12.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.10.dist-info → openai_agents-0.0.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from typing import Any, Literal, cast
|
|
6
|
+
|
|
7
|
+
from openai import NOT_GIVEN, NotGiven
|
|
8
|
+
from openai.types.chat import (
|
|
9
|
+
ChatCompletionAssistantMessageParam,
|
|
10
|
+
ChatCompletionContentPartImageParam,
|
|
11
|
+
ChatCompletionContentPartParam,
|
|
12
|
+
ChatCompletionContentPartTextParam,
|
|
13
|
+
ChatCompletionDeveloperMessageParam,
|
|
14
|
+
ChatCompletionMessage,
|
|
15
|
+
ChatCompletionMessageParam,
|
|
16
|
+
ChatCompletionMessageToolCallParam,
|
|
17
|
+
ChatCompletionSystemMessageParam,
|
|
18
|
+
ChatCompletionToolChoiceOptionParam,
|
|
19
|
+
ChatCompletionToolMessageParam,
|
|
20
|
+
ChatCompletionUserMessageParam,
|
|
21
|
+
)
|
|
22
|
+
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
|
|
23
|
+
from openai.types.chat.completion_create_params import ResponseFormat
|
|
24
|
+
from openai.types.responses import (
|
|
25
|
+
EasyInputMessageParam,
|
|
26
|
+
ResponseFileSearchToolCallParam,
|
|
27
|
+
ResponseFunctionToolCall,
|
|
28
|
+
ResponseFunctionToolCallParam,
|
|
29
|
+
ResponseInputContentParam,
|
|
30
|
+
ResponseInputImageParam,
|
|
31
|
+
ResponseInputTextParam,
|
|
32
|
+
ResponseOutputMessage,
|
|
33
|
+
ResponseOutputMessageParam,
|
|
34
|
+
ResponseOutputRefusal,
|
|
35
|
+
ResponseOutputText,
|
|
36
|
+
)
|
|
37
|
+
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
|
|
38
|
+
|
|
39
|
+
from ..agent_output import AgentOutputSchemaBase
|
|
40
|
+
from ..exceptions import AgentsException, UserError
|
|
41
|
+
from ..handoffs import Handoff
|
|
42
|
+
from ..items import TResponseInputItem, TResponseOutputItem
|
|
43
|
+
from ..tool import FunctionTool, Tool
|
|
44
|
+
from .fake_id import FAKE_RESPONSES_ID
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class Converter:
|
|
48
|
+
@classmethod
|
|
49
|
+
def convert_tool_choice(
|
|
50
|
+
cls, tool_choice: Literal["auto", "required", "none"] | str | None
|
|
51
|
+
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
|
|
52
|
+
if tool_choice is None:
|
|
53
|
+
return NOT_GIVEN
|
|
54
|
+
elif tool_choice == "auto":
|
|
55
|
+
return "auto"
|
|
56
|
+
elif tool_choice == "required":
|
|
57
|
+
return "required"
|
|
58
|
+
elif tool_choice == "none":
|
|
59
|
+
return "none"
|
|
60
|
+
else:
|
|
61
|
+
return {
|
|
62
|
+
"type": "function",
|
|
63
|
+
"function": {
|
|
64
|
+
"name": tool_choice,
|
|
65
|
+
},
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
@classmethod
|
|
69
|
+
def convert_response_format(
|
|
70
|
+
cls, final_output_schema: AgentOutputSchemaBase | None
|
|
71
|
+
) -> ResponseFormat | NotGiven:
|
|
72
|
+
if not final_output_schema or final_output_schema.is_plain_text():
|
|
73
|
+
return NOT_GIVEN
|
|
74
|
+
|
|
75
|
+
return {
|
|
76
|
+
"type": "json_schema",
|
|
77
|
+
"json_schema": {
|
|
78
|
+
"name": "final_output",
|
|
79
|
+
"strict": final_output_schema.is_strict_json_schema(),
|
|
80
|
+
"schema": final_output_schema.json_schema(),
|
|
81
|
+
},
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
@classmethod
|
|
85
|
+
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
|
|
86
|
+
items: list[TResponseOutputItem] = []
|
|
87
|
+
|
|
88
|
+
message_item = ResponseOutputMessage(
|
|
89
|
+
id=FAKE_RESPONSES_ID,
|
|
90
|
+
content=[],
|
|
91
|
+
role="assistant",
|
|
92
|
+
type="message",
|
|
93
|
+
status="completed",
|
|
94
|
+
)
|
|
95
|
+
if message.content:
|
|
96
|
+
message_item.content.append(
|
|
97
|
+
ResponseOutputText(text=message.content, type="output_text", annotations=[])
|
|
98
|
+
)
|
|
99
|
+
if message.refusal:
|
|
100
|
+
message_item.content.append(
|
|
101
|
+
ResponseOutputRefusal(refusal=message.refusal, type="refusal")
|
|
102
|
+
)
|
|
103
|
+
if message.audio:
|
|
104
|
+
raise AgentsException("Audio is not currently supported")
|
|
105
|
+
|
|
106
|
+
if message_item.content:
|
|
107
|
+
items.append(message_item)
|
|
108
|
+
|
|
109
|
+
if message.tool_calls:
|
|
110
|
+
for tool_call in message.tool_calls:
|
|
111
|
+
items.append(
|
|
112
|
+
ResponseFunctionToolCall(
|
|
113
|
+
id=FAKE_RESPONSES_ID,
|
|
114
|
+
call_id=tool_call.id,
|
|
115
|
+
arguments=tool_call.function.arguments,
|
|
116
|
+
name=tool_call.function.name,
|
|
117
|
+
type="function_call",
|
|
118
|
+
)
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
return items
|
|
122
|
+
|
|
123
|
+
@classmethod
|
|
124
|
+
def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None:
|
|
125
|
+
if not isinstance(item, dict):
|
|
126
|
+
return None
|
|
127
|
+
|
|
128
|
+
keys = item.keys()
|
|
129
|
+
# EasyInputMessageParam only has these two keys
|
|
130
|
+
if keys != {"content", "role"}:
|
|
131
|
+
return None
|
|
132
|
+
|
|
133
|
+
role = item.get("role", None)
|
|
134
|
+
if role not in ("user", "assistant", "system", "developer"):
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
if "content" not in item:
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
return cast(EasyInputMessageParam, item)
|
|
141
|
+
|
|
142
|
+
@classmethod
|
|
143
|
+
def maybe_input_message(cls, item: Any) -> Message | None:
|
|
144
|
+
if (
|
|
145
|
+
isinstance(item, dict)
|
|
146
|
+
and item.get("type") == "message"
|
|
147
|
+
and item.get("role")
|
|
148
|
+
in (
|
|
149
|
+
"user",
|
|
150
|
+
"system",
|
|
151
|
+
"developer",
|
|
152
|
+
)
|
|
153
|
+
):
|
|
154
|
+
return cast(Message, item)
|
|
155
|
+
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
@classmethod
|
|
159
|
+
def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None:
|
|
160
|
+
if isinstance(item, dict) and item.get("type") == "file_search_call":
|
|
161
|
+
return cast(ResponseFileSearchToolCallParam, item)
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
@classmethod
|
|
165
|
+
def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None:
|
|
166
|
+
if isinstance(item, dict) and item.get("type") == "function_call":
|
|
167
|
+
return cast(ResponseFunctionToolCallParam, item)
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
@classmethod
|
|
171
|
+
def maybe_function_tool_call_output(
|
|
172
|
+
cls,
|
|
173
|
+
item: Any,
|
|
174
|
+
) -> FunctionCallOutput | None:
|
|
175
|
+
if isinstance(item, dict) and item.get("type") == "function_call_output":
|
|
176
|
+
return cast(FunctionCallOutput, item)
|
|
177
|
+
return None
|
|
178
|
+
|
|
179
|
+
@classmethod
|
|
180
|
+
def maybe_item_reference(cls, item: Any) -> ItemReference | None:
|
|
181
|
+
if isinstance(item, dict) and item.get("type") == "item_reference":
|
|
182
|
+
return cast(ItemReference, item)
|
|
183
|
+
return None
|
|
184
|
+
|
|
185
|
+
@classmethod
|
|
186
|
+
def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None:
|
|
187
|
+
# ResponseOutputMessage is only used for messages with role assistant
|
|
188
|
+
if (
|
|
189
|
+
isinstance(item, dict)
|
|
190
|
+
and item.get("type") == "message"
|
|
191
|
+
and item.get("role") == "assistant"
|
|
192
|
+
):
|
|
193
|
+
return cast(ResponseOutputMessageParam, item)
|
|
194
|
+
return None
|
|
195
|
+
|
|
196
|
+
@classmethod
|
|
197
|
+
def extract_text_content(
|
|
198
|
+
cls, content: str | Iterable[ResponseInputContentParam]
|
|
199
|
+
) -> str | list[ChatCompletionContentPartTextParam]:
|
|
200
|
+
all_content = cls.extract_all_content(content)
|
|
201
|
+
if isinstance(all_content, str):
|
|
202
|
+
return all_content
|
|
203
|
+
out: list[ChatCompletionContentPartTextParam] = []
|
|
204
|
+
for c in all_content:
|
|
205
|
+
if c.get("type") == "text":
|
|
206
|
+
out.append(cast(ChatCompletionContentPartTextParam, c))
|
|
207
|
+
return out
|
|
208
|
+
|
|
209
|
+
@classmethod
|
|
210
|
+
def extract_all_content(
|
|
211
|
+
cls, content: str | Iterable[ResponseInputContentParam]
|
|
212
|
+
) -> str | list[ChatCompletionContentPartParam]:
|
|
213
|
+
if isinstance(content, str):
|
|
214
|
+
return content
|
|
215
|
+
out: list[ChatCompletionContentPartParam] = []
|
|
216
|
+
|
|
217
|
+
for c in content:
|
|
218
|
+
if isinstance(c, dict) and c.get("type") == "input_text":
|
|
219
|
+
casted_text_param = cast(ResponseInputTextParam, c)
|
|
220
|
+
out.append(
|
|
221
|
+
ChatCompletionContentPartTextParam(
|
|
222
|
+
type="text",
|
|
223
|
+
text=casted_text_param["text"],
|
|
224
|
+
)
|
|
225
|
+
)
|
|
226
|
+
elif isinstance(c, dict) and c.get("type") == "input_image":
|
|
227
|
+
casted_image_param = cast(ResponseInputImageParam, c)
|
|
228
|
+
if "image_url" not in casted_image_param or not casted_image_param["image_url"]:
|
|
229
|
+
raise UserError(
|
|
230
|
+
f"Only image URLs are supported for input_image {casted_image_param}"
|
|
231
|
+
)
|
|
232
|
+
out.append(
|
|
233
|
+
ChatCompletionContentPartImageParam(
|
|
234
|
+
type="image_url",
|
|
235
|
+
image_url={
|
|
236
|
+
"url": casted_image_param["image_url"],
|
|
237
|
+
"detail": casted_image_param["detail"],
|
|
238
|
+
},
|
|
239
|
+
)
|
|
240
|
+
)
|
|
241
|
+
elif isinstance(c, dict) and c.get("type") == "input_file":
|
|
242
|
+
raise UserError(f"File uploads are not supported for chat completions {c}")
|
|
243
|
+
else:
|
|
244
|
+
raise UserError(f"Unknown content: {c}")
|
|
245
|
+
return out
|
|
246
|
+
|
|
247
|
+
@classmethod
|
|
248
|
+
def items_to_messages(
|
|
249
|
+
cls,
|
|
250
|
+
items: str | Iterable[TResponseInputItem],
|
|
251
|
+
) -> list[ChatCompletionMessageParam]:
|
|
252
|
+
"""
|
|
253
|
+
Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
|
|
254
|
+
|
|
255
|
+
Rules:
|
|
256
|
+
- EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
|
|
257
|
+
- EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
|
|
258
|
+
- EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam
|
|
259
|
+
- InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam
|
|
260
|
+
- response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam
|
|
261
|
+
- tool calls get attached to the *current* assistant message, or create one if none.
|
|
262
|
+
- tool outputs => ChatCompletionToolMessageParam
|
|
263
|
+
"""
|
|
264
|
+
|
|
265
|
+
if isinstance(items, str):
|
|
266
|
+
return [
|
|
267
|
+
ChatCompletionUserMessageParam(
|
|
268
|
+
role="user",
|
|
269
|
+
content=items,
|
|
270
|
+
)
|
|
271
|
+
]
|
|
272
|
+
|
|
273
|
+
result: list[ChatCompletionMessageParam] = []
|
|
274
|
+
current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
|
|
275
|
+
|
|
276
|
+
def flush_assistant_message() -> None:
|
|
277
|
+
nonlocal current_assistant_msg
|
|
278
|
+
if current_assistant_msg is not None:
|
|
279
|
+
# The API doesn't support empty arrays for tool_calls
|
|
280
|
+
if not current_assistant_msg.get("tool_calls"):
|
|
281
|
+
del current_assistant_msg["tool_calls"]
|
|
282
|
+
result.append(current_assistant_msg)
|
|
283
|
+
current_assistant_msg = None
|
|
284
|
+
|
|
285
|
+
def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
|
|
286
|
+
nonlocal current_assistant_msg
|
|
287
|
+
if current_assistant_msg is None:
|
|
288
|
+
current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
|
|
289
|
+
current_assistant_msg["tool_calls"] = []
|
|
290
|
+
return current_assistant_msg
|
|
291
|
+
|
|
292
|
+
for item in items:
|
|
293
|
+
# 1) Check easy input message
|
|
294
|
+
if easy_msg := cls.maybe_easy_input_message(item):
|
|
295
|
+
role = easy_msg["role"]
|
|
296
|
+
content = easy_msg["content"]
|
|
297
|
+
|
|
298
|
+
if role == "user":
|
|
299
|
+
flush_assistant_message()
|
|
300
|
+
msg_user: ChatCompletionUserMessageParam = {
|
|
301
|
+
"role": "user",
|
|
302
|
+
"content": cls.extract_all_content(content),
|
|
303
|
+
}
|
|
304
|
+
result.append(msg_user)
|
|
305
|
+
elif role == "system":
|
|
306
|
+
flush_assistant_message()
|
|
307
|
+
msg_system: ChatCompletionSystemMessageParam = {
|
|
308
|
+
"role": "system",
|
|
309
|
+
"content": cls.extract_text_content(content),
|
|
310
|
+
}
|
|
311
|
+
result.append(msg_system)
|
|
312
|
+
elif role == "developer":
|
|
313
|
+
flush_assistant_message()
|
|
314
|
+
msg_developer: ChatCompletionDeveloperMessageParam = {
|
|
315
|
+
"role": "developer",
|
|
316
|
+
"content": cls.extract_text_content(content),
|
|
317
|
+
}
|
|
318
|
+
result.append(msg_developer)
|
|
319
|
+
elif role == "assistant":
|
|
320
|
+
flush_assistant_message()
|
|
321
|
+
msg_assistant: ChatCompletionAssistantMessageParam = {
|
|
322
|
+
"role": "assistant",
|
|
323
|
+
"content": cls.extract_text_content(content),
|
|
324
|
+
}
|
|
325
|
+
result.append(msg_assistant)
|
|
326
|
+
else:
|
|
327
|
+
raise UserError(f"Unexpected role in easy_input_message: {role}")
|
|
328
|
+
|
|
329
|
+
# 2) Check input message
|
|
330
|
+
elif in_msg := cls.maybe_input_message(item):
|
|
331
|
+
role = in_msg["role"]
|
|
332
|
+
content = in_msg["content"]
|
|
333
|
+
flush_assistant_message()
|
|
334
|
+
|
|
335
|
+
if role == "user":
|
|
336
|
+
msg_user = {
|
|
337
|
+
"role": "user",
|
|
338
|
+
"content": cls.extract_all_content(content),
|
|
339
|
+
}
|
|
340
|
+
result.append(msg_user)
|
|
341
|
+
elif role == "system":
|
|
342
|
+
msg_system = {
|
|
343
|
+
"role": "system",
|
|
344
|
+
"content": cls.extract_text_content(content),
|
|
345
|
+
}
|
|
346
|
+
result.append(msg_system)
|
|
347
|
+
elif role == "developer":
|
|
348
|
+
msg_developer = {
|
|
349
|
+
"role": "developer",
|
|
350
|
+
"content": cls.extract_text_content(content),
|
|
351
|
+
}
|
|
352
|
+
result.append(msg_developer)
|
|
353
|
+
else:
|
|
354
|
+
raise UserError(f"Unexpected role in input_message: {role}")
|
|
355
|
+
|
|
356
|
+
# 3) response output message => assistant
|
|
357
|
+
elif resp_msg := cls.maybe_response_output_message(item):
|
|
358
|
+
flush_assistant_message()
|
|
359
|
+
new_asst = ChatCompletionAssistantMessageParam(role="assistant")
|
|
360
|
+
contents = resp_msg["content"]
|
|
361
|
+
|
|
362
|
+
text_segments = []
|
|
363
|
+
for c in contents:
|
|
364
|
+
if c["type"] == "output_text":
|
|
365
|
+
text_segments.append(c["text"])
|
|
366
|
+
elif c["type"] == "refusal":
|
|
367
|
+
new_asst["refusal"] = c["refusal"]
|
|
368
|
+
elif c["type"] == "output_audio":
|
|
369
|
+
# Can't handle this, b/c chat completions expects an ID which we dont have
|
|
370
|
+
raise UserError(
|
|
371
|
+
f"Only audio IDs are supported for chat completions, but got: {c}"
|
|
372
|
+
)
|
|
373
|
+
else:
|
|
374
|
+
raise UserError(f"Unknown content type in ResponseOutputMessage: {c}")
|
|
375
|
+
|
|
376
|
+
if text_segments:
|
|
377
|
+
combined = "\n".join(text_segments)
|
|
378
|
+
new_asst["content"] = combined
|
|
379
|
+
|
|
380
|
+
new_asst["tool_calls"] = []
|
|
381
|
+
current_assistant_msg = new_asst
|
|
382
|
+
|
|
383
|
+
# 4) function/file-search calls => attach to assistant
|
|
384
|
+
elif file_search := cls.maybe_file_search_call(item):
|
|
385
|
+
asst = ensure_assistant_message()
|
|
386
|
+
tool_calls = list(asst.get("tool_calls", []))
|
|
387
|
+
new_tool_call = ChatCompletionMessageToolCallParam(
|
|
388
|
+
id=file_search["id"],
|
|
389
|
+
type="function",
|
|
390
|
+
function={
|
|
391
|
+
"name": "file_search_call",
|
|
392
|
+
"arguments": json.dumps(
|
|
393
|
+
{
|
|
394
|
+
"queries": file_search.get("queries", []),
|
|
395
|
+
"status": file_search.get("status"),
|
|
396
|
+
}
|
|
397
|
+
),
|
|
398
|
+
},
|
|
399
|
+
)
|
|
400
|
+
tool_calls.append(new_tool_call)
|
|
401
|
+
asst["tool_calls"] = tool_calls
|
|
402
|
+
|
|
403
|
+
elif func_call := cls.maybe_function_tool_call(item):
|
|
404
|
+
asst = ensure_assistant_message()
|
|
405
|
+
tool_calls = list(asst.get("tool_calls", []))
|
|
406
|
+
arguments = func_call["arguments"] if func_call["arguments"] else "{}"
|
|
407
|
+
new_tool_call = ChatCompletionMessageToolCallParam(
|
|
408
|
+
id=func_call["call_id"],
|
|
409
|
+
type="function",
|
|
410
|
+
function={
|
|
411
|
+
"name": func_call["name"],
|
|
412
|
+
"arguments": arguments,
|
|
413
|
+
},
|
|
414
|
+
)
|
|
415
|
+
tool_calls.append(new_tool_call)
|
|
416
|
+
asst["tool_calls"] = tool_calls
|
|
417
|
+
# 5) function call output => tool message
|
|
418
|
+
elif func_output := cls.maybe_function_tool_call_output(item):
|
|
419
|
+
flush_assistant_message()
|
|
420
|
+
msg: ChatCompletionToolMessageParam = {
|
|
421
|
+
"role": "tool",
|
|
422
|
+
"tool_call_id": func_output["call_id"],
|
|
423
|
+
"content": func_output["output"],
|
|
424
|
+
}
|
|
425
|
+
result.append(msg)
|
|
426
|
+
|
|
427
|
+
# 6) item reference => handle or raise
|
|
428
|
+
elif item_ref := cls.maybe_item_reference(item):
|
|
429
|
+
raise UserError(
|
|
430
|
+
f"Encountered an item_reference, which is not supported: {item_ref}"
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# 7) If we haven't recognized it => fail or ignore
|
|
434
|
+
else:
|
|
435
|
+
raise UserError(f"Unhandled item type or structure: {item}")
|
|
436
|
+
|
|
437
|
+
flush_assistant_message()
|
|
438
|
+
return result
|
|
439
|
+
|
|
440
|
+
@classmethod
|
|
441
|
+
def tool_to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
|
|
442
|
+
if isinstance(tool, FunctionTool):
|
|
443
|
+
return {
|
|
444
|
+
"type": "function",
|
|
445
|
+
"function": {
|
|
446
|
+
"name": tool.name,
|
|
447
|
+
"description": tool.description or "",
|
|
448
|
+
"parameters": tool.params_json_schema,
|
|
449
|
+
},
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
raise UserError(
|
|
453
|
+
f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
|
|
454
|
+
f"{type(tool)}, tool: {tool}"
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
@classmethod
|
|
458
|
+
def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam:
|
|
459
|
+
return {
|
|
460
|
+
"type": "function",
|
|
461
|
+
"function": {
|
|
462
|
+
"name": handoff.tool_name,
|
|
463
|
+
"description": handoff.tool_description,
|
|
464
|
+
"parameters": handoff.input_json_schema,
|
|
465
|
+
},
|
|
466
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from openai import AsyncOpenAI
|
|
4
|
+
|
|
5
|
+
from ..model_settings import ModelSettings
|
|
6
|
+
from ..version import __version__
|
|
7
|
+
|
|
8
|
+
_USER_AGENT = f"Agents/Python {__version__}"
|
|
9
|
+
HEADERS = {"User-Agent": _USER_AGENT}
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ChatCmplHelpers:
|
|
13
|
+
@classmethod
|
|
14
|
+
def is_openai(cls, client: AsyncOpenAI):
|
|
15
|
+
return str(client.base_url).startswith("https://api.openai.com")
|
|
16
|
+
|
|
17
|
+
@classmethod
|
|
18
|
+
def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:
|
|
19
|
+
# Match the behavior of Responses where store is True when not given
|
|
20
|
+
default_store = True if cls.is_openai(client) else None
|
|
21
|
+
return model_settings.store if model_settings.store is not None else default_store
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def get_stream_options_param(
|
|
25
|
+
cls, client: AsyncOpenAI, model_settings: ModelSettings, stream: bool
|
|
26
|
+
) -> dict[str, bool] | None:
|
|
27
|
+
if not stream:
|
|
28
|
+
return None
|
|
29
|
+
|
|
30
|
+
default_include_usage = True if cls.is_openai(client) else None
|
|
31
|
+
include_usage = (
|
|
32
|
+
model_settings.include_usage
|
|
33
|
+
if model_settings.include_usage is not None
|
|
34
|
+
else default_include_usage
|
|
35
|
+
)
|
|
36
|
+
stream_options = {"include_usage": include_usage} if include_usage is not None else None
|
|
37
|
+
return stream_options
|