unique_toolkit 1.15.0__py3-none-any.whl → 1.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/tools/factory.py +4 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
- unique_toolkit/agentic/tools/tool_manager.py +257 -127
- unique_toolkit/chat/functions.py +15 -6
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/language_model/functions.py +25 -9
- unique_toolkit/language_model/schemas.py +222 -27
- unique_toolkit/protocols/support.py +91 -9
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +139 -7
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/METADATA +5 -1
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/RECORD +24 -12
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/LICENSE +0 -0
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,461 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Any, NamedTuple, Sequence
|
3
|
+
|
4
|
+
import unique_sdk
|
5
|
+
from openai.types.responses import (
|
6
|
+
ResponseIncludable,
|
7
|
+
ResponseInputItemParam,
|
8
|
+
ResponseInputParam,
|
9
|
+
ResponseOutputItem,
|
10
|
+
ResponseTextConfigParam,
|
11
|
+
ToolParam,
|
12
|
+
response_create_params,
|
13
|
+
)
|
14
|
+
from openai.types.shared_params import Metadata, Reasoning
|
15
|
+
from pydantic import BaseModel, TypeAdapter, ValidationError
|
16
|
+
|
17
|
+
from unique_toolkit.agentic.tools.utils.execution.execution import (
|
18
|
+
failsafe,
|
19
|
+
)
|
20
|
+
from unique_toolkit.content.schemas import ContentChunk
|
21
|
+
from unique_toolkit.language_model.constants import (
|
22
|
+
DEFAULT_COMPLETE_TEMPERATURE,
|
23
|
+
)
|
24
|
+
from unique_toolkit.language_model.functions import (
|
25
|
+
SearchContext,
|
26
|
+
_clamp_temperature,
|
27
|
+
_to_search_context,
|
28
|
+
)
|
29
|
+
from unique_toolkit.language_model.infos import (
|
30
|
+
LanguageModelInfo,
|
31
|
+
LanguageModelName,
|
32
|
+
)
|
33
|
+
from unique_toolkit.language_model.schemas import (
|
34
|
+
LanguageModelAssistantMessage,
|
35
|
+
LanguageModelMessage,
|
36
|
+
LanguageModelMessageOptions,
|
37
|
+
LanguageModelMessageRole,
|
38
|
+
LanguageModelMessages,
|
39
|
+
LanguageModelSystemMessage,
|
40
|
+
LanguageModelToolDescription,
|
41
|
+
LanguageModelToolMessage,
|
42
|
+
LanguageModelUserMessage,
|
43
|
+
ResponsesLanguageModelStreamResponse,
|
44
|
+
)
|
45
|
+
|
46
|
+
logger = logging.getLogger(__name__)
|
47
|
+
|
48
|
+
|
49
|
+
def _convert_tools_to_openai(
|
50
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam],
|
51
|
+
) -> list[ToolParam]:
|
52
|
+
openai_tools = []
|
53
|
+
for tool in tools:
|
54
|
+
if isinstance(tool, LanguageModelToolDescription):
|
55
|
+
openai_tools.append(tool.to_openai(mode="responses"))
|
56
|
+
else:
|
57
|
+
openai_tools.append(tool)
|
58
|
+
return openai_tools
|
59
|
+
|
60
|
+
|
61
|
+
def _convert_message_to_openai(
|
62
|
+
message: LanguageModelMessageOptions,
|
63
|
+
) -> ResponseInputParam:
|
64
|
+
res = []
|
65
|
+
match message:
|
66
|
+
case LanguageModelAssistantMessage():
|
67
|
+
return message.to_openai(mode="responses") # type: ignore
|
68
|
+
case (
|
69
|
+
LanguageModelUserMessage()
|
70
|
+
| LanguageModelSystemMessage()
|
71
|
+
| LanguageModelToolMessage()
|
72
|
+
):
|
73
|
+
return [message.to_openai(mode="responses")]
|
74
|
+
case _:
|
75
|
+
return _convert_message_to_openai(_convert_to_specific_message(message))
|
76
|
+
return res
|
77
|
+
|
78
|
+
|
79
|
+
def _convert_to_specific_message(
|
80
|
+
message: LanguageModelMessage,
|
81
|
+
) -> "LanguageModelSystemMessage | LanguageModelUserMessage | LanguageModelAssistantMessage":
|
82
|
+
match message.role:
|
83
|
+
case LanguageModelMessageRole.SYSTEM:
|
84
|
+
return LanguageModelSystemMessage(content=message.content)
|
85
|
+
case LanguageModelMessageRole.USER:
|
86
|
+
return LanguageModelUserMessage(content=message.content)
|
87
|
+
case LanguageModelMessageRole.ASSISTANT:
|
88
|
+
return LanguageModelAssistantMessage(content=message.content)
|
89
|
+
case LanguageModelMessageRole.TOOL:
|
90
|
+
raise ValueError(
|
91
|
+
"Cannot convert message with role `tool`. Please use `LanguageModelToolMessage` instead."
|
92
|
+
)
|
93
|
+
|
94
|
+
|
95
|
+
def _convert_messages_to_openai(
|
96
|
+
messages: Sequence[
|
97
|
+
ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
|
98
|
+
],
|
99
|
+
) -> ResponseInputParam:
|
100
|
+
res = []
|
101
|
+
for message in messages:
|
102
|
+
if isinstance(message, LanguageModelMessageOptions):
|
103
|
+
res.extend(_convert_message_to_openai(message))
|
104
|
+
elif isinstance(
|
105
|
+
message, dict
|
106
|
+
): # Openai uses dicts for their input and BaseModel as output
|
107
|
+
res.append(message)
|
108
|
+
else:
|
109
|
+
assert isinstance(message, BaseModel)
|
110
|
+
res.append(message.model_dump(exclude_defaults=True))
|
111
|
+
|
112
|
+
return res
|
113
|
+
|
114
|
+
|
115
|
+
class _ResponsesParams(NamedTuple):
|
116
|
+
temperature: float
|
117
|
+
model_name: str
|
118
|
+
search_context: SearchContext | None
|
119
|
+
messages: str | ResponseInputParam
|
120
|
+
tools: list[ToolParam] | None
|
121
|
+
reasoning: Reasoning | None
|
122
|
+
text: ResponseTextConfigParam | None
|
123
|
+
|
124
|
+
|
125
|
+
def _prepare_responses_params_util(
|
126
|
+
model_name: LanguageModelName | str,
|
127
|
+
content_chunks: list[ContentChunk] | None,
|
128
|
+
temperature: float,
|
129
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam] | None,
|
130
|
+
messages: str
|
131
|
+
| LanguageModelMessages
|
132
|
+
| Sequence[
|
133
|
+
ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
|
134
|
+
],
|
135
|
+
reasoning: Reasoning | None,
|
136
|
+
text: ResponseTextConfigParam | None,
|
137
|
+
other_options: dict | None = None,
|
138
|
+
) -> _ResponsesParams:
|
139
|
+
search_context = (
|
140
|
+
_to_search_context(content_chunks) if content_chunks is not None else None
|
141
|
+
)
|
142
|
+
|
143
|
+
model = model_name.name if isinstance(model_name, LanguageModelName) else model_name
|
144
|
+
|
145
|
+
tools_res = _convert_tools_to_openai(tools) if tools is not None else None
|
146
|
+
|
147
|
+
if other_options is not None:
|
148
|
+
# Key word argument takes precedence
|
149
|
+
reasoning = reasoning or _attempt_extract_reasoning_from_options(other_options)
|
150
|
+
text = text or _attempt_extract_verbosity_from_options(other_options)
|
151
|
+
|
152
|
+
if isinstance(model_name, LanguageModelName):
|
153
|
+
model_info = LanguageModelInfo.from_name(model_name)
|
154
|
+
|
155
|
+
if model_info.temperature_bounds is not None and temperature is not None:
|
156
|
+
temperature = _clamp_temperature(temperature, model_info.temperature_bounds)
|
157
|
+
|
158
|
+
if (
|
159
|
+
reasoning is None
|
160
|
+
and model_info.default_options is not None
|
161
|
+
and "reasoning_effort" in model_info.default_options
|
162
|
+
):
|
163
|
+
reasoning = Reasoning(effort=model_info.default_options["reasoning_effort"])
|
164
|
+
|
165
|
+
if (
|
166
|
+
reasoning is not None
|
167
|
+
and tools_res is not None
|
168
|
+
and any(tool["type"] == "code_interpreter" for tool in tools_res)
|
169
|
+
and "effort" in reasoning
|
170
|
+
and reasoning["effort"] == "minimal"
|
171
|
+
):
|
172
|
+
logger.warning(
|
173
|
+
"Code interpreter cannot be used with `minimal` effort. Switching to `low`."
|
174
|
+
)
|
175
|
+
reasoning["effort"] = (
|
176
|
+
"low" # Code interpreter cannot be used with minimal effort
|
177
|
+
)
|
178
|
+
|
179
|
+
messages_res = None
|
180
|
+
if isinstance(messages, LanguageModelMessages):
|
181
|
+
messages_res = _convert_messages_to_openai(messages.root)
|
182
|
+
elif isinstance(messages, list):
|
183
|
+
messages_res = _convert_messages_to_openai(messages)
|
184
|
+
else:
|
185
|
+
assert isinstance(messages, str)
|
186
|
+
messages_res = messages
|
187
|
+
|
188
|
+
return _ResponsesParams(
|
189
|
+
temperature, model, search_context, messages_res, tools_res, reasoning, text
|
190
|
+
)
|
191
|
+
|
192
|
+
|
193
|
+
@failsafe(
|
194
|
+
failure_return_value=None,
|
195
|
+
exceptions=(ValidationError,),
|
196
|
+
log_exc_info=False,
|
197
|
+
logger=logger,
|
198
|
+
)
|
199
|
+
def _attempt_extract_reasoning_from_options(options: dict) -> Reasoning | None:
|
200
|
+
reasoning = None
|
201
|
+
|
202
|
+
# Responses API
|
203
|
+
if "reasoning" in options:
|
204
|
+
reasoning = options["reasoning"]
|
205
|
+
|
206
|
+
# Completions API
|
207
|
+
elif "reasoning_effort" in options:
|
208
|
+
reasoning = {"effort": options["reasoning_effort"]}
|
209
|
+
if "reasoningEffort" in options:
|
210
|
+
reasoning = {"effort": options["reasoningEffort"]}
|
211
|
+
|
212
|
+
if reasoning is not None:
|
213
|
+
return TypeAdapter(Reasoning).validate_python(reasoning)
|
214
|
+
|
215
|
+
return None
|
216
|
+
|
217
|
+
|
218
|
+
@failsafe(
|
219
|
+
failure_return_value=None,
|
220
|
+
exceptions=(ValidationError,),
|
221
|
+
log_exc_info=False,
|
222
|
+
logger=logger,
|
223
|
+
)
|
224
|
+
def _attempt_extract_verbosity_from_options(
|
225
|
+
options: dict,
|
226
|
+
) -> ResponseTextConfigParam | None:
|
227
|
+
reasoning = None
|
228
|
+
|
229
|
+
# Responses API
|
230
|
+
if "text" in options:
|
231
|
+
reasoning = options["text"]
|
232
|
+
|
233
|
+
# Completions API
|
234
|
+
elif "verbosity" in options:
|
235
|
+
reasoning = {"verbosity": options["verbosity"]}
|
236
|
+
|
237
|
+
if reasoning is not None:
|
238
|
+
return TypeAdapter(ResponseTextConfigParam).validate_python(reasoning)
|
239
|
+
|
240
|
+
return None
|
241
|
+
|
242
|
+
|
243
|
+
def _prepare_responses_args(
|
244
|
+
company_id: str,
|
245
|
+
user_id: str,
|
246
|
+
assistant_message_id: str,
|
247
|
+
user_message_id: str,
|
248
|
+
chat_id: str,
|
249
|
+
assistant_id: str,
|
250
|
+
params: _ResponsesParams,
|
251
|
+
debug_info: dict | None,
|
252
|
+
start_text: str | None,
|
253
|
+
include: list[ResponseIncludable] | None,
|
254
|
+
instructions: str | None,
|
255
|
+
max_output_tokens: int | None,
|
256
|
+
metadata: Metadata | None,
|
257
|
+
parallel_tool_calls: bool | None,
|
258
|
+
tool_choice: response_create_params.ToolChoice | None,
|
259
|
+
top_p: float | None,
|
260
|
+
other_options: dict | None = None,
|
261
|
+
) -> dict[str, Any]:
|
262
|
+
options = {}
|
263
|
+
|
264
|
+
options["company_id"] = company_id
|
265
|
+
options["user_id"] = user_id
|
266
|
+
|
267
|
+
options["model"] = params.model_name
|
268
|
+
|
269
|
+
if params.search_context is not None:
|
270
|
+
options["searchContext"] = params.search_context
|
271
|
+
|
272
|
+
options["chatId"] = chat_id
|
273
|
+
options["assistantId"] = assistant_id
|
274
|
+
options["assistantMessageId"] = assistant_message_id
|
275
|
+
options["userMessageId"] = user_message_id
|
276
|
+
|
277
|
+
if debug_info is not None:
|
278
|
+
options["debugInfo"] = debug_info
|
279
|
+
if start_text is not None:
|
280
|
+
options["startText"] = start_text
|
281
|
+
|
282
|
+
options["input"] = params.messages
|
283
|
+
|
284
|
+
openai_options: unique_sdk.Integrated.CreateStreamResponsesOpenaiParams = {}
|
285
|
+
|
286
|
+
if params.temperature is not None:
|
287
|
+
openai_options["temperature"] = params.temperature
|
288
|
+
|
289
|
+
if params.reasoning is not None:
|
290
|
+
openai_options["reasoning"] = params.reasoning
|
291
|
+
|
292
|
+
if params.text is not None:
|
293
|
+
openai_options["text"] = params.text
|
294
|
+
|
295
|
+
if include is not None:
|
296
|
+
openai_options["include"] = include
|
297
|
+
|
298
|
+
if instructions is not None:
|
299
|
+
openai_options["instructions"] = instructions
|
300
|
+
|
301
|
+
if max_output_tokens is not None:
|
302
|
+
openai_options["max_output_tokens"] = max_output_tokens
|
303
|
+
|
304
|
+
if metadata is not None:
|
305
|
+
openai_options["metadata"] = metadata
|
306
|
+
|
307
|
+
if parallel_tool_calls is not None:
|
308
|
+
openai_options["parallel_tool_calls"] = parallel_tool_calls
|
309
|
+
|
310
|
+
if tool_choice is not None:
|
311
|
+
openai_options["tool_choice"] = tool_choice
|
312
|
+
|
313
|
+
if params.tools is not None:
|
314
|
+
openai_options["tools"] = params.tools
|
315
|
+
|
316
|
+
if top_p is not None:
|
317
|
+
openai_options["top_p"] = top_p
|
318
|
+
|
319
|
+
# allow any other openai.resources.responses.Response.create options
|
320
|
+
if other_options is not None:
|
321
|
+
openai_options.update(other_options) # type: ignore
|
322
|
+
|
323
|
+
options["options"] = openai_options
|
324
|
+
|
325
|
+
return options
|
326
|
+
|
327
|
+
|
328
|
+
def stream_responses_with_references(
|
329
|
+
*,
|
330
|
+
company_id: str,
|
331
|
+
user_id: str,
|
332
|
+
assistant_message_id: str,
|
333
|
+
user_message_id: str,
|
334
|
+
chat_id: str,
|
335
|
+
assistant_id: str,
|
336
|
+
model_name: LanguageModelName | str,
|
337
|
+
messages: str
|
338
|
+
| LanguageModelMessages
|
339
|
+
| Sequence[
|
340
|
+
ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
|
341
|
+
],
|
342
|
+
content_chunks: list[ContentChunk] | None = None,
|
343
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
|
344
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
345
|
+
debug_info: dict | None = None,
|
346
|
+
start_text: str | None = None,
|
347
|
+
include: list[ResponseIncludable] | None = None,
|
348
|
+
instructions: str | None = None,
|
349
|
+
max_output_tokens: int | None = None,
|
350
|
+
metadata: Metadata | None = None,
|
351
|
+
parallel_tool_calls: bool | None = None,
|
352
|
+
text: ResponseTextConfigParam | None = None,
|
353
|
+
tool_choice: response_create_params.ToolChoice | None = None,
|
354
|
+
top_p: float | None = None,
|
355
|
+
reasoning: Reasoning | None = None,
|
356
|
+
other_options: dict | None = None,
|
357
|
+
) -> ResponsesLanguageModelStreamResponse:
|
358
|
+
responses_params = _prepare_responses_params_util(
|
359
|
+
model_name=model_name,
|
360
|
+
content_chunks=content_chunks,
|
361
|
+
temperature=temperature,
|
362
|
+
tools=tools,
|
363
|
+
messages=messages,
|
364
|
+
reasoning=reasoning,
|
365
|
+
text=text,
|
366
|
+
other_options=other_options,
|
367
|
+
)
|
368
|
+
|
369
|
+
responses_args = _prepare_responses_args(
|
370
|
+
company_id=company_id,
|
371
|
+
user_id=user_id,
|
372
|
+
assistant_message_id=assistant_message_id,
|
373
|
+
user_message_id=user_message_id,
|
374
|
+
chat_id=chat_id,
|
375
|
+
assistant_id=assistant_id,
|
376
|
+
params=responses_params,
|
377
|
+
debug_info=debug_info,
|
378
|
+
start_text=start_text,
|
379
|
+
include=include,
|
380
|
+
instructions=instructions,
|
381
|
+
max_output_tokens=max_output_tokens,
|
382
|
+
metadata=metadata,
|
383
|
+
parallel_tool_calls=parallel_tool_calls,
|
384
|
+
tool_choice=tool_choice,
|
385
|
+
top_p=top_p,
|
386
|
+
other_options=other_options,
|
387
|
+
)
|
388
|
+
|
389
|
+
return ResponsesLanguageModelStreamResponse.model_validate(
|
390
|
+
unique_sdk.Integrated.responses_stream(
|
391
|
+
**responses_args,
|
392
|
+
)
|
393
|
+
)
|
394
|
+
|
395
|
+
|
396
|
+
async def stream_responses_with_references_async(
|
397
|
+
*,
|
398
|
+
company_id: str,
|
399
|
+
user_id: str,
|
400
|
+
assistant_message_id: str,
|
401
|
+
user_message_id: str,
|
402
|
+
chat_id: str,
|
403
|
+
assistant_id: str,
|
404
|
+
model_name: LanguageModelName | str,
|
405
|
+
messages: str
|
406
|
+
| LanguageModelMessages
|
407
|
+
| Sequence[
|
408
|
+
ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
|
409
|
+
],
|
410
|
+
content_chunks: list[ContentChunk] | None = None,
|
411
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
|
412
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
413
|
+
debug_info: dict | None = None,
|
414
|
+
start_text: str | None = None,
|
415
|
+
include: list[ResponseIncludable] | None = None,
|
416
|
+
instructions: str | None = None,
|
417
|
+
max_output_tokens: int | None = None,
|
418
|
+
metadata: Metadata | None = None,
|
419
|
+
parallel_tool_calls: bool | None = None,
|
420
|
+
text: ResponseTextConfigParam | None = None,
|
421
|
+
tool_choice: response_create_params.ToolChoice | None = None,
|
422
|
+
top_p: float | None = None,
|
423
|
+
reasoning: Reasoning | None = None,
|
424
|
+
other_options: dict | None = None,
|
425
|
+
) -> ResponsesLanguageModelStreamResponse:
|
426
|
+
responses_params = _prepare_responses_params_util(
|
427
|
+
model_name=model_name,
|
428
|
+
content_chunks=content_chunks,
|
429
|
+
temperature=temperature,
|
430
|
+
tools=tools,
|
431
|
+
messages=messages,
|
432
|
+
reasoning=reasoning,
|
433
|
+
text=text,
|
434
|
+
other_options=other_options,
|
435
|
+
)
|
436
|
+
|
437
|
+
responses_args = _prepare_responses_args(
|
438
|
+
company_id=company_id,
|
439
|
+
user_id=user_id,
|
440
|
+
assistant_message_id=assistant_message_id,
|
441
|
+
user_message_id=user_message_id,
|
442
|
+
chat_id=chat_id,
|
443
|
+
assistant_id=assistant_id,
|
444
|
+
params=responses_params,
|
445
|
+
debug_info=debug_info,
|
446
|
+
start_text=start_text,
|
447
|
+
include=include,
|
448
|
+
instructions=instructions,
|
449
|
+
max_output_tokens=max_output_tokens,
|
450
|
+
metadata=metadata,
|
451
|
+
parallel_tool_calls=parallel_tool_calls,
|
452
|
+
tool_choice=tool_choice,
|
453
|
+
top_p=top_p,
|
454
|
+
other_options=other_options,
|
455
|
+
)
|
456
|
+
|
457
|
+
return ResponsesLanguageModelStreamResponse.model_validate(
|
458
|
+
await unique_sdk.Integrated.responses_stream_async(
|
459
|
+
**responses_args,
|
460
|
+
)
|
461
|
+
)
|
@@ -1,10 +1,11 @@
|
|
1
1
|
import copy
|
2
2
|
import logging
|
3
3
|
from datetime import UTC, datetime
|
4
|
-
from typing import Any, cast
|
4
|
+
from typing import Any, Sequence, cast
|
5
5
|
|
6
6
|
import humps
|
7
7
|
import unique_sdk
|
8
|
+
from openai.types.chat import ChatCompletionToolChoiceOptionParam
|
8
9
|
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
9
10
|
from pydantic import BaseModel
|
10
11
|
|
@@ -159,7 +160,7 @@ async def complete_async(
|
|
159
160
|
|
160
161
|
def _add_tools_to_options(
|
161
162
|
options: dict,
|
162
|
-
tools:
|
163
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None,
|
163
164
|
) -> dict:
|
164
165
|
if tools:
|
165
166
|
options["tools"] = [
|
@@ -172,7 +173,12 @@ def _add_tools_to_options(
|
|
172
173
|
return options
|
173
174
|
|
174
175
|
|
175
|
-
|
176
|
+
SearchContext = list[unique_sdk.Integrated.SearchResult]
|
177
|
+
|
178
|
+
|
179
|
+
def _to_search_context(
|
180
|
+
chunks: list[ContentChunk],
|
181
|
+
) -> SearchContext | None:
|
176
182
|
if not chunks:
|
177
183
|
return None
|
178
184
|
return [
|
@@ -211,12 +217,12 @@ def _prepare_completion_params_util(
|
|
211
217
|
messages: LanguageModelMessages,
|
212
218
|
model_name: LanguageModelName | str,
|
213
219
|
temperature: float,
|
214
|
-
tools:
|
220
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
215
221
|
other_options: dict | None = None,
|
216
222
|
content_chunks: list[ContentChunk] | None = None,
|
217
223
|
structured_output_model: type[BaseModel] | None = None,
|
218
224
|
structured_output_enforce_schema: bool = False,
|
219
|
-
) -> tuple[dict, str, dict,
|
225
|
+
) -> tuple[dict, str, dict, SearchContext | None]:
|
220
226
|
"""Prepare common parameters for completion requests.
|
221
227
|
|
222
228
|
Returns
|
@@ -259,12 +265,12 @@ def _prepare_completion_params_util(
|
|
259
265
|
def _prepare_openai_completion_params_util(
|
260
266
|
model_name: LanguageModelName | str,
|
261
267
|
temperature: float,
|
262
|
-
tools:
|
268
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
263
269
|
other_options: dict | None = None,
|
264
270
|
content_chunks: list[ContentChunk] | None = None,
|
265
271
|
structured_output_model: type[BaseModel] | None = None,
|
266
272
|
structured_output_enforce_schema: bool = False,
|
267
|
-
) -> tuple[dict, str,
|
273
|
+
) -> tuple[dict, str, SearchContext | None]:
|
268
274
|
"""Prepare common parameters for completion requests.
|
269
275
|
|
270
276
|
Returns
|
@@ -330,18 +336,28 @@ def _prepare_all_completions_params_util(
|
|
330
336
|
messages: LanguageModelMessages | list[ChatCompletionMessageParam],
|
331
337
|
model_name: LanguageModelName | str,
|
332
338
|
temperature: float,
|
333
|
-
tools:
|
339
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
334
340
|
other_options: dict | None = None,
|
335
341
|
content_chunks: list[ContentChunk] | None = None,
|
342
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
|
336
343
|
structured_output_model: type[BaseModel] | None = None,
|
337
344
|
structured_output_enforce_schema: bool = False,
|
338
345
|
) -> tuple[
|
339
346
|
dict,
|
340
347
|
str,
|
341
348
|
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
342
|
-
|
349
|
+
SearchContext | None,
|
343
350
|
]:
|
344
351
|
model_info = None
|
352
|
+
|
353
|
+
other_options = copy.deepcopy(other_options)
|
354
|
+
|
355
|
+
if tool_choice is not None:
|
356
|
+
if other_options is None:
|
357
|
+
other_options = {}
|
358
|
+
if "toolChoice" not in other_options:
|
359
|
+
other_options["toolChoice"] = tool_choice # Backend expects CamelCase
|
360
|
+
|
345
361
|
if isinstance(model_name, LanguageModelName):
|
346
362
|
model_info = LanguageModelInfo.from_name(model_name)
|
347
363
|
other_options = _prepare_other_options(
|