mirascope 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/_stubs.py +39 -18
- mirascope/_utils.py +34 -0
- mirascope/api/_generated/__init__.py +4 -0
- mirascope/api/_generated/organization_invitations/client.py +2 -2
- mirascope/api/_generated/organization_invitations/raw_client.py +2 -2
- mirascope/api/_generated/project_memberships/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/client.py +91 -0
- mirascope/api/_generated/project_memberships/raw_client.py +239 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/reference.md +73 -1
- mirascope/llm/__init__.py +19 -0
- mirascope/llm/calls/calls.py +28 -21
- mirascope/llm/calls/decorator.py +17 -24
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +2 -4
- mirascope/llm/formatting/types.py +19 -2
- mirascope/llm/models/models.py +66 -146
- mirascope/llm/prompts/decorator.py +5 -16
- mirascope/llm/prompts/prompts.py +35 -38
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
- mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
- mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
- mirascope/llm/providers/anthropic/beta_provider.py +33 -69
- mirascope/llm/providers/anthropic/provider.py +52 -91
- mirascope/llm/providers/base/_utils.py +4 -9
- mirascope/llm/providers/base/base_provider.py +89 -205
- mirascope/llm/providers/google/_utils/decode.py +51 -1
- mirascope/llm/providers/google/_utils/encode.py +38 -21
- mirascope/llm/providers/google/provider.py +33 -69
- mirascope/llm/providers/mirascope/provider.py +25 -61
- mirascope/llm/providers/mlx/encoding/base.py +3 -6
- mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
- mirascope/llm/providers/mlx/mlx.py +9 -21
- mirascope/llm/providers/mlx/provider.py +33 -69
- mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
- mirascope/llm/providers/openai/completions/base_provider.py +34 -75
- mirascope/llm/providers/openai/provider.py +25 -61
- mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
- mirascope/llm/providers/openai/responses/provider.py +34 -75
- mirascope/llm/responses/__init__.py +2 -1
- mirascope/llm/responses/base_stream_response.py +4 -0
- mirascope/llm/responses/response.py +8 -12
- mirascope/llm/responses/stream_response.py +8 -12
- mirascope/llm/responses/usage.py +44 -0
- mirascope/llm/tools/__init__.py +24 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +11 -4
- mirascope/llm/tools/toolkit.py +24 -6
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/ops/__init__.py +19 -1
- mirascope/ops/_internal/closure.py +4 -1
- mirascope/ops/_internal/exporters/exporters.py +13 -46
- mirascope/ops/_internal/exporters/utils.py +37 -0
- mirascope/ops/_internal/instrumentation/__init__.py +20 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
- mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
- mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- mirascope/ops/_internal/traced_calls.py +14 -0
- mirascope/ops/_internal/traced_functions.py +7 -2
- mirascope/ops/_internal/utils.py +12 -4
- mirascope/ops/_internal/versioned_functions.py +1 -1
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/RECORD +75 -64
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,15 +11,22 @@ from openai.types import chat as openai_types, shared_params as shared_openai_ty
|
|
|
11
11
|
from openai.types.shared_params.response_format_json_schema import JSONSchema
|
|
12
12
|
|
|
13
13
|
from .....exceptions import FeatureNotSupportedError
|
|
14
|
-
from .....formatting import Format,
|
|
14
|
+
from .....formatting import Format, FormatSpec, FormattableT, resolve_format
|
|
15
15
|
from .....messages import AssistantMessage, Message, UserMessage
|
|
16
|
-
from .....tools import
|
|
16
|
+
from .....tools import (
|
|
17
|
+
FORMAT_TOOL_NAME,
|
|
18
|
+
AnyToolSchema,
|
|
19
|
+
BaseToolkit,
|
|
20
|
+
ProviderTool,
|
|
21
|
+
WebSearchTool,
|
|
22
|
+
)
|
|
17
23
|
from ....base import _utils as _base_utils
|
|
18
24
|
from ...model_id import OpenAIModelId, model_name
|
|
19
25
|
from ...model_info import (
|
|
20
26
|
MODELS_WITHOUT_AUDIO_SUPPORT,
|
|
21
27
|
MODELS_WITHOUT_JSON_OBJECT_SUPPORT,
|
|
22
28
|
MODELS_WITHOUT_JSON_SCHEMA_SUPPORT,
|
|
29
|
+
NON_REASONING_MODELS,
|
|
23
30
|
)
|
|
24
31
|
|
|
25
32
|
if TYPE_CHECKING:
|
|
@@ -41,6 +48,7 @@ class ChatCompletionCreateKwargs(TypedDict, total=False):
|
|
|
41
48
|
parallel_tool_calls: bool | Omit
|
|
42
49
|
temperature: float | Omit
|
|
43
50
|
max_tokens: int | Omit
|
|
51
|
+
max_completion_tokens: int | Omit
|
|
44
52
|
top_p: float | Omit
|
|
45
53
|
seed: int | Omit
|
|
46
54
|
stop: str | list[str] | Omit
|
|
@@ -228,9 +236,20 @@ def _encode_message(
|
|
|
228
236
|
|
|
229
237
|
@lru_cache(maxsize=128)
|
|
230
238
|
def _convert_tool_to_tool_param(
|
|
231
|
-
tool: AnyToolSchema,
|
|
239
|
+
tool: AnyToolSchema | ProviderTool,
|
|
232
240
|
) -> openai_types.ChatCompletionToolParam:
|
|
233
241
|
"""Convert a single Mirascope `Tool` to OpenAI ChatCompletionToolParam with caching."""
|
|
242
|
+
if isinstance(tool, WebSearchTool):
|
|
243
|
+
raise FeatureNotSupportedError(
|
|
244
|
+
"WebSearchTool",
|
|
245
|
+
provider_id="openai:completions",
|
|
246
|
+
message="Web search is only available in the OpenAI Responses API. "
|
|
247
|
+
"Use a model with :responses suffix (e.g., 'openai/gpt-4o:responses').",
|
|
248
|
+
)
|
|
249
|
+
if isinstance(tool, ProviderTool):
|
|
250
|
+
raise FeatureNotSupportedError(
|
|
251
|
+
f"Provider tool {tool.name}", provider_id="openai:completions"
|
|
252
|
+
)
|
|
234
253
|
schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
|
|
235
254
|
schema_dict["type"] = "object"
|
|
236
255
|
_base_utils.ensure_additional_properties_false(schema_dict)
|
|
@@ -280,11 +299,8 @@ def encode_request(
|
|
|
280
299
|
*,
|
|
281
300
|
model_id: OpenAIModelId,
|
|
282
301
|
messages: Sequence[Message],
|
|
283
|
-
tools:
|
|
284
|
-
format:
|
|
285
|
-
| Format[FormattableT]
|
|
286
|
-
| OutputParser[FormattableT]
|
|
287
|
-
| None,
|
|
302
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
303
|
+
format: FormatSpec[FormattableT] | None,
|
|
288
304
|
params: Params,
|
|
289
305
|
) -> tuple[Sequence[Message], Format[FormattableT] | None, ChatCompletionCreateKwargs]:
|
|
290
306
|
"""Prepares a request for the `OpenAI.chat.completions.create` method."""
|
|
@@ -297,37 +313,40 @@ def encode_request(
|
|
|
297
313
|
)
|
|
298
314
|
base_model_name = model_name(model_id, None)
|
|
299
315
|
|
|
316
|
+
is_reasoning_model = base_model_name not in NON_REASONING_MODELS
|
|
300
317
|
kwargs: ChatCompletionCreateKwargs = ChatCompletionCreateKwargs(
|
|
301
|
-
{
|
|
302
|
-
"model": base_model_name,
|
|
303
|
-
}
|
|
318
|
+
{"model": base_model_name}
|
|
304
319
|
)
|
|
305
320
|
encode_thoughts_as_text = False
|
|
306
321
|
|
|
307
322
|
with _base_utils.ensure_all_params_accessed(
|
|
308
323
|
params=params,
|
|
309
324
|
provider_id="openai",
|
|
310
|
-
unsupported_params=[
|
|
325
|
+
unsupported_params=[
|
|
326
|
+
"top_k",
|
|
327
|
+
*(["temperature", "top_p", "stop_sequences"] if is_reasoning_model else []),
|
|
328
|
+
],
|
|
311
329
|
) as param_accessor:
|
|
312
|
-
if param_accessor.temperature is not None:
|
|
330
|
+
if not is_reasoning_model and param_accessor.temperature is not None:
|
|
313
331
|
kwargs["temperature"] = param_accessor.temperature
|
|
314
332
|
if param_accessor.max_tokens is not None:
|
|
315
|
-
|
|
316
|
-
|
|
333
|
+
if is_reasoning_model:
|
|
334
|
+
kwargs["max_completion_tokens"] = param_accessor.max_tokens
|
|
335
|
+
else:
|
|
336
|
+
kwargs["max_tokens"] = param_accessor.max_tokens
|
|
337
|
+
if not is_reasoning_model and param_accessor.top_p is not None:
|
|
317
338
|
kwargs["top_p"] = param_accessor.top_p
|
|
318
339
|
|
|
319
340
|
if param_accessor.seed is not None:
|
|
320
341
|
kwargs["seed"] = param_accessor.seed
|
|
321
|
-
if param_accessor.stop_sequences is not None:
|
|
342
|
+
if not is_reasoning_model and param_accessor.stop_sequences is not None:
|
|
322
343
|
kwargs["stop"] = param_accessor.stop_sequences
|
|
323
344
|
if param_accessor.thinking is not None:
|
|
324
345
|
thinking = param_accessor.thinking
|
|
325
346
|
if thinking.get("encode_thoughts_as_text"):
|
|
326
347
|
encode_thoughts_as_text = True
|
|
327
348
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
openai_tools = [_convert_tool_to_tool_param(tool) for tool in tools]
|
|
349
|
+
openai_tools = [_convert_tool_to_tool_param(tool) for tool in tools.tools]
|
|
331
350
|
|
|
332
351
|
model_supports_strict = base_model_name not in MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
|
|
333
352
|
default_mode = "strict" if model_supports_strict else "tool"
|
|
@@ -342,7 +361,7 @@ def encode_request(
|
|
|
342
361
|
)
|
|
343
362
|
kwargs["response_format"] = _create_strict_response_format(format)
|
|
344
363
|
elif format.mode == "tool":
|
|
345
|
-
if tools:
|
|
364
|
+
if tools.tools:
|
|
346
365
|
kwargs["tool_choice"] = "required"
|
|
347
366
|
else:
|
|
348
367
|
kwargs["tool_choice"] = {
|
|
@@ -10,7 +10,7 @@ from typing_extensions import Unpack
|
|
|
10
10
|
from openai import AsyncOpenAI, OpenAI
|
|
11
11
|
|
|
12
12
|
from ....context import Context, DepsT
|
|
13
|
-
from ....formatting import
|
|
13
|
+
from ....formatting import FormatSpec, FormattableT
|
|
14
14
|
from ....messages import Message
|
|
15
15
|
from ....responses import (
|
|
16
16
|
AsyncContextResponse,
|
|
@@ -22,16 +22,7 @@ from ....responses import (
|
|
|
22
22
|
Response,
|
|
23
23
|
StreamResponse,
|
|
24
24
|
)
|
|
25
|
-
from ....tools import
|
|
26
|
-
AsyncContextTool,
|
|
27
|
-
AsyncContextToolkit,
|
|
28
|
-
AsyncTool,
|
|
29
|
-
AsyncToolkit,
|
|
30
|
-
ContextTool,
|
|
31
|
-
ContextToolkit,
|
|
32
|
-
Tool,
|
|
33
|
-
Toolkit,
|
|
34
|
-
)
|
|
25
|
+
from ....tools import AsyncContextToolkit, AsyncToolkit, ContextToolkit, Toolkit
|
|
35
26
|
from ...base import BaseProvider
|
|
36
27
|
from .. import _utils as _shared_utils
|
|
37
28
|
from ..model_id import model_name as openai_model_name
|
|
@@ -101,11 +92,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
101
92
|
*,
|
|
102
93
|
model_id: str,
|
|
103
94
|
messages: Sequence[Message],
|
|
104
|
-
|
|
105
|
-
format:
|
|
106
|
-
| Format[FormattableT]
|
|
107
|
-
| OutputParser[FormattableT]
|
|
108
|
-
| None = None,
|
|
95
|
+
toolkit: Toolkit,
|
|
96
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
109
97
|
**params: Unpack[Params],
|
|
110
98
|
) -> Response | Response[FormattableT]:
|
|
111
99
|
"""Generate an `llm.Response` by synchronously calling the API.
|
|
@@ -123,7 +111,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
123
111
|
input_messages, format, kwargs = _utils.encode_request(
|
|
124
112
|
model_id=model_id,
|
|
125
113
|
messages=messages,
|
|
126
|
-
tools=
|
|
114
|
+
tools=toolkit,
|
|
127
115
|
format=format,
|
|
128
116
|
params=params,
|
|
129
117
|
)
|
|
@@ -143,7 +131,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
143
131
|
model_id=model_id,
|
|
144
132
|
provider_model_name=self._provider_model_name(model_id),
|
|
145
133
|
params=params,
|
|
146
|
-
tools=
|
|
134
|
+
tools=toolkit,
|
|
147
135
|
input_messages=input_messages,
|
|
148
136
|
assistant_message=assistant_message,
|
|
149
137
|
finish_reason=finish_reason,
|
|
@@ -157,13 +145,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
157
145
|
ctx: Context[DepsT],
|
|
158
146
|
model_id: str,
|
|
159
147
|
messages: Sequence[Message],
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
| None = None,
|
|
163
|
-
format: type[FormattableT]
|
|
164
|
-
| Format[FormattableT]
|
|
165
|
-
| OutputParser[FormattableT]
|
|
166
|
-
| None = None,
|
|
148
|
+
toolkit: ContextToolkit[DepsT],
|
|
149
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
167
150
|
**params: Unpack[Params],
|
|
168
151
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
169
152
|
"""Generate an `llm.ContextResponse` by synchronously calling the API.
|
|
@@ -182,7 +165,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
182
165
|
input_messages, format, kwargs = _utils.encode_request(
|
|
183
166
|
model_id=model_id,
|
|
184
167
|
messages=messages,
|
|
185
|
-
tools=
|
|
168
|
+
tools=toolkit,
|
|
186
169
|
format=format,
|
|
187
170
|
params=params,
|
|
188
171
|
)
|
|
@@ -202,7 +185,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
202
185
|
model_id=model_id,
|
|
203
186
|
provider_model_name=self._provider_model_name(model_id),
|
|
204
187
|
params=params,
|
|
205
|
-
tools=
|
|
188
|
+
tools=toolkit,
|
|
206
189
|
input_messages=input_messages,
|
|
207
190
|
assistant_message=assistant_message,
|
|
208
191
|
finish_reason=finish_reason,
|
|
@@ -215,11 +198,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
215
198
|
*,
|
|
216
199
|
model_id: str,
|
|
217
200
|
messages: Sequence[Message],
|
|
218
|
-
|
|
219
|
-
format:
|
|
220
|
-
| Format[FormattableT]
|
|
221
|
-
| OutputParser[FormattableT]
|
|
222
|
-
| None = None,
|
|
201
|
+
toolkit: AsyncToolkit,
|
|
202
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
223
203
|
**params: Unpack[Params],
|
|
224
204
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
225
205
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the API.
|
|
@@ -238,7 +218,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
238
218
|
model_id=model_id,
|
|
239
219
|
params=params,
|
|
240
220
|
messages=messages,
|
|
241
|
-
tools=
|
|
221
|
+
tools=toolkit,
|
|
242
222
|
format=format,
|
|
243
223
|
)
|
|
244
224
|
kwargs["model"] = self._model_name(model_id)
|
|
@@ -257,7 +237,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
257
237
|
model_id=model_id,
|
|
258
238
|
provider_model_name=self._provider_model_name(model_id),
|
|
259
239
|
params=params,
|
|
260
|
-
tools=
|
|
240
|
+
tools=toolkit,
|
|
261
241
|
input_messages=input_messages,
|
|
262
242
|
assistant_message=assistant_message,
|
|
263
243
|
finish_reason=finish_reason,
|
|
@@ -271,13 +251,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
271
251
|
ctx: Context[DepsT],
|
|
272
252
|
model_id: str,
|
|
273
253
|
messages: Sequence[Message],
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
| None = None,
|
|
277
|
-
format: type[FormattableT]
|
|
278
|
-
| Format[FormattableT]
|
|
279
|
-
| OutputParser[FormattableT]
|
|
280
|
-
| None = None,
|
|
254
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
255
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
281
256
|
**params: Unpack[Params],
|
|
282
257
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
283
258
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the API.
|
|
@@ -297,7 +272,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
297
272
|
model_id=model_id,
|
|
298
273
|
params=params,
|
|
299
274
|
messages=messages,
|
|
300
|
-
tools=
|
|
275
|
+
tools=toolkit,
|
|
301
276
|
format=format,
|
|
302
277
|
)
|
|
303
278
|
kwargs["model"] = self._model_name(model_id)
|
|
@@ -316,7 +291,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
316
291
|
model_id=model_id,
|
|
317
292
|
provider_model_name=self._provider_model_name(model_id),
|
|
318
293
|
params=params,
|
|
319
|
-
tools=
|
|
294
|
+
tools=toolkit,
|
|
320
295
|
input_messages=input_messages,
|
|
321
296
|
assistant_message=assistant_message,
|
|
322
297
|
finish_reason=finish_reason,
|
|
@@ -329,11 +304,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
329
304
|
*,
|
|
330
305
|
model_id: str,
|
|
331
306
|
messages: Sequence[Message],
|
|
332
|
-
|
|
333
|
-
format:
|
|
334
|
-
| Format[FormattableT]
|
|
335
|
-
| OutputParser[FormattableT]
|
|
336
|
-
| None = None,
|
|
307
|
+
toolkit: Toolkit,
|
|
308
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
337
309
|
**params: Unpack[Params],
|
|
338
310
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
339
311
|
"""Generate an `llm.StreamResponse` by synchronously streaming from the API.
|
|
@@ -351,7 +323,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
351
323
|
input_messages, format, kwargs = _utils.encode_request(
|
|
352
324
|
model_id=model_id,
|
|
353
325
|
messages=messages,
|
|
354
|
-
tools=
|
|
326
|
+
tools=toolkit,
|
|
355
327
|
format=format,
|
|
356
328
|
params=params,
|
|
357
329
|
)
|
|
@@ -369,7 +341,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
369
341
|
model_id=model_id,
|
|
370
342
|
provider_model_name=self._provider_model_name(model_id),
|
|
371
343
|
params=params,
|
|
372
|
-
tools=
|
|
344
|
+
tools=toolkit,
|
|
373
345
|
input_messages=input_messages,
|
|
374
346
|
chunk_iterator=chunk_iterator,
|
|
375
347
|
format=format,
|
|
@@ -381,13 +353,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
381
353
|
ctx: Context[DepsT],
|
|
382
354
|
model_id: str,
|
|
383
355
|
messages: Sequence[Message],
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
| None = None,
|
|
387
|
-
format: type[FormattableT]
|
|
388
|
-
| Format[FormattableT]
|
|
389
|
-
| OutputParser[FormattableT]
|
|
390
|
-
| None = None,
|
|
356
|
+
toolkit: ContextToolkit[DepsT],
|
|
357
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
391
358
|
**params: Unpack[Params],
|
|
392
359
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
393
360
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the API.
|
|
@@ -406,7 +373,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
406
373
|
input_messages, format, kwargs = _utils.encode_request(
|
|
407
374
|
model_id=model_id,
|
|
408
375
|
messages=messages,
|
|
409
|
-
tools=
|
|
376
|
+
tools=toolkit,
|
|
410
377
|
format=format,
|
|
411
378
|
params=params,
|
|
412
379
|
)
|
|
@@ -425,7 +392,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
425
392
|
model_id=model_id,
|
|
426
393
|
provider_model_name=self._provider_model_name(model_id),
|
|
427
394
|
params=params,
|
|
428
|
-
tools=
|
|
395
|
+
tools=toolkit,
|
|
429
396
|
input_messages=input_messages,
|
|
430
397
|
chunk_iterator=chunk_iterator,
|
|
431
398
|
format=format,
|
|
@@ -436,11 +403,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
436
403
|
*,
|
|
437
404
|
model_id: str,
|
|
438
405
|
messages: Sequence[Message],
|
|
439
|
-
|
|
440
|
-
format:
|
|
441
|
-
| Format[FormattableT]
|
|
442
|
-
| OutputParser[FormattableT]
|
|
443
|
-
| None = None,
|
|
406
|
+
toolkit: AsyncToolkit,
|
|
407
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
444
408
|
**params: Unpack[Params],
|
|
445
409
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
446
410
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the API.
|
|
@@ -458,7 +422,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
458
422
|
input_messages, format, kwargs = _utils.encode_request(
|
|
459
423
|
model_id=model_id,
|
|
460
424
|
messages=messages,
|
|
461
|
-
tools=
|
|
425
|
+
tools=toolkit,
|
|
462
426
|
format=format,
|
|
463
427
|
params=params,
|
|
464
428
|
)
|
|
@@ -476,7 +440,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
476
440
|
model_id=model_id,
|
|
477
441
|
provider_model_name=self._provider_model_name(model_id),
|
|
478
442
|
params=params,
|
|
479
|
-
tools=
|
|
443
|
+
tools=toolkit,
|
|
480
444
|
input_messages=input_messages,
|
|
481
445
|
chunk_iterator=chunk_iterator,
|
|
482
446
|
format=format,
|
|
@@ -488,13 +452,8 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
488
452
|
ctx: Context[DepsT],
|
|
489
453
|
model_id: str,
|
|
490
454
|
messages: Sequence[Message],
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
| None = None,
|
|
494
|
-
format: type[FormattableT]
|
|
495
|
-
| Format[FormattableT]
|
|
496
|
-
| OutputParser[FormattableT]
|
|
497
|
-
| None = None,
|
|
455
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
456
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
498
457
|
**params: Unpack[Params],
|
|
499
458
|
) -> (
|
|
500
459
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -516,7 +475,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
516
475
|
input_messages, format, kwargs = _utils.encode_request(
|
|
517
476
|
model_id=model_id,
|
|
518
477
|
messages=messages,
|
|
519
|
-
tools=
|
|
478
|
+
tools=toolkit,
|
|
520
479
|
format=format,
|
|
521
480
|
params=params,
|
|
522
481
|
)
|
|
@@ -535,7 +494,7 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
|
|
|
535
494
|
model_id=model_id,
|
|
536
495
|
provider_model_name=self._provider_model_name(model_id),
|
|
537
496
|
params=params,
|
|
538
|
-
tools=
|
|
497
|
+
tools=toolkit,
|
|
539
498
|
input_messages=input_messages,
|
|
540
499
|
chunk_iterator=chunk_iterator,
|
|
541
500
|
format=format,
|
|
@@ -10,7 +10,7 @@ from openai import BadRequestError as OpenAIBadRequestError, OpenAI
|
|
|
10
10
|
|
|
11
11
|
from ...context import Context, DepsT
|
|
12
12
|
from ...exceptions import BadRequestError, NotFoundError
|
|
13
|
-
from ...formatting import
|
|
13
|
+
from ...formatting import FormatSpec, FormattableT
|
|
14
14
|
from ...messages import Message
|
|
15
15
|
from ...responses import (
|
|
16
16
|
AsyncContextResponse,
|
|
@@ -23,13 +23,9 @@ from ...responses import (
|
|
|
23
23
|
StreamResponse,
|
|
24
24
|
)
|
|
25
25
|
from ...tools import (
|
|
26
|
-
AsyncContextTool,
|
|
27
26
|
AsyncContextToolkit,
|
|
28
|
-
AsyncTool,
|
|
29
27
|
AsyncToolkit,
|
|
30
|
-
ContextTool,
|
|
31
28
|
ContextToolkit,
|
|
32
|
-
Tool,
|
|
33
29
|
Toolkit,
|
|
34
30
|
)
|
|
35
31
|
from ..base import BaseProvider
|
|
@@ -162,11 +158,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
162
158
|
*,
|
|
163
159
|
model_id: OpenAIModelId,
|
|
164
160
|
messages: Sequence[Message],
|
|
165
|
-
|
|
166
|
-
format:
|
|
167
|
-
| Format[FormattableT]
|
|
168
|
-
| OutputParser[FormattableT]
|
|
169
|
-
| None = None,
|
|
161
|
+
toolkit: Toolkit,
|
|
162
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
170
163
|
**params: Unpack[Params],
|
|
171
164
|
) -> Response | Response[FormattableT]:
|
|
172
165
|
"""Generate an `llm.Response` by synchronously calling the OpenAI API.
|
|
@@ -185,7 +178,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
185
178
|
return client.call(
|
|
186
179
|
model_id=model_id,
|
|
187
180
|
messages=messages,
|
|
188
|
-
|
|
181
|
+
toolkit=toolkit,
|
|
189
182
|
format=format,
|
|
190
183
|
**params,
|
|
191
184
|
)
|
|
@@ -196,13 +189,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
196
189
|
ctx: Context[DepsT],
|
|
197
190
|
model_id: OpenAIModelId,
|
|
198
191
|
messages: Sequence[Message],
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
| None = None,
|
|
202
|
-
format: type[FormattableT]
|
|
203
|
-
| Format[FormattableT]
|
|
204
|
-
| OutputParser[FormattableT]
|
|
205
|
-
| None = None,
|
|
192
|
+
toolkit: ContextToolkit[DepsT],
|
|
193
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
206
194
|
**params: Unpack[Params],
|
|
207
195
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
208
196
|
"""Generate an `llm.ContextResponse` by synchronously calling the OpenAI API.
|
|
@@ -223,7 +211,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
223
211
|
ctx=ctx,
|
|
224
212
|
model_id=model_id,
|
|
225
213
|
messages=messages,
|
|
226
|
-
|
|
214
|
+
toolkit=toolkit,
|
|
227
215
|
format=format,
|
|
228
216
|
**params,
|
|
229
217
|
)
|
|
@@ -233,11 +221,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
233
221
|
*,
|
|
234
222
|
model_id: OpenAIModelId,
|
|
235
223
|
messages: Sequence[Message],
|
|
236
|
-
|
|
237
|
-
format:
|
|
238
|
-
| Format[FormattableT]
|
|
239
|
-
| OutputParser[FormattableT]
|
|
240
|
-
| None = None,
|
|
224
|
+
toolkit: AsyncToolkit,
|
|
225
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
241
226
|
**params: Unpack[Params],
|
|
242
227
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
243
228
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI API.
|
|
@@ -255,7 +240,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
255
240
|
return await self._choose_subprovider(model_id, messages).call_async(
|
|
256
241
|
model_id=model_id,
|
|
257
242
|
messages=messages,
|
|
258
|
-
|
|
243
|
+
toolkit=toolkit,
|
|
259
244
|
format=format,
|
|
260
245
|
**params,
|
|
261
246
|
)
|
|
@@ -266,13 +251,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
266
251
|
ctx: Context[DepsT],
|
|
267
252
|
model_id: OpenAIModelId,
|
|
268
253
|
messages: Sequence[Message],
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
| None = None,
|
|
272
|
-
format: type[FormattableT]
|
|
273
|
-
| Format[FormattableT]
|
|
274
|
-
| OutputParser[FormattableT]
|
|
275
|
-
| None = None,
|
|
254
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
255
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
276
256
|
**params: Unpack[Params],
|
|
277
257
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
278
258
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI API.
|
|
@@ -292,7 +272,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
292
272
|
ctx=ctx,
|
|
293
273
|
model_id=model_id,
|
|
294
274
|
messages=messages,
|
|
295
|
-
|
|
275
|
+
toolkit=toolkit,
|
|
296
276
|
format=format,
|
|
297
277
|
**params,
|
|
298
278
|
)
|
|
@@ -302,11 +282,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
302
282
|
*,
|
|
303
283
|
model_id: OpenAIModelId,
|
|
304
284
|
messages: Sequence[Message],
|
|
305
|
-
|
|
306
|
-
format:
|
|
307
|
-
| Format[FormattableT]
|
|
308
|
-
| OutputParser[FormattableT]
|
|
309
|
-
| None = None,
|
|
285
|
+
toolkit: Toolkit,
|
|
286
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
310
287
|
**params: Unpack[Params],
|
|
311
288
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
312
289
|
"""Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI API.
|
|
@@ -325,7 +302,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
325
302
|
return client.stream(
|
|
326
303
|
model_id=model_id,
|
|
327
304
|
messages=messages,
|
|
328
|
-
|
|
305
|
+
toolkit=toolkit,
|
|
329
306
|
format=format,
|
|
330
307
|
**params,
|
|
331
308
|
)
|
|
@@ -336,13 +313,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
336
313
|
ctx: Context[DepsT],
|
|
337
314
|
model_id: OpenAIModelId,
|
|
338
315
|
messages: Sequence[Message],
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
| None = None,
|
|
342
|
-
format: type[FormattableT]
|
|
343
|
-
| Format[FormattableT]
|
|
344
|
-
| OutputParser[FormattableT]
|
|
345
|
-
| None = None,
|
|
316
|
+
toolkit: ContextToolkit[DepsT],
|
|
317
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
346
318
|
**params: Unpack[Params],
|
|
347
319
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
348
320
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI API.
|
|
@@ -363,7 +335,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
363
335
|
ctx=ctx,
|
|
364
336
|
model_id=model_id,
|
|
365
337
|
messages=messages,
|
|
366
|
-
|
|
338
|
+
toolkit=toolkit,
|
|
367
339
|
format=format,
|
|
368
340
|
**params,
|
|
369
341
|
)
|
|
@@ -373,11 +345,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
373
345
|
*,
|
|
374
346
|
model_id: OpenAIModelId,
|
|
375
347
|
messages: Sequence[Message],
|
|
376
|
-
|
|
377
|
-
format:
|
|
378
|
-
| Format[FormattableT]
|
|
379
|
-
| OutputParser[FormattableT]
|
|
380
|
-
| None = None,
|
|
348
|
+
toolkit: AsyncToolkit,
|
|
349
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
381
350
|
**params: Unpack[Params],
|
|
382
351
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
383
352
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI API.
|
|
@@ -395,7 +364,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
395
364
|
return await self._choose_subprovider(model_id, messages).stream_async(
|
|
396
365
|
model_id=model_id,
|
|
397
366
|
messages=messages,
|
|
398
|
-
|
|
367
|
+
toolkit=toolkit,
|
|
399
368
|
format=format,
|
|
400
369
|
**params,
|
|
401
370
|
)
|
|
@@ -406,13 +375,8 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
406
375
|
ctx: Context[DepsT],
|
|
407
376
|
model_id: OpenAIModelId,
|
|
408
377
|
messages: Sequence[Message],
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
| None = None,
|
|
412
|
-
format: type[FormattableT]
|
|
413
|
-
| Format[FormattableT]
|
|
414
|
-
| OutputParser[FormattableT]
|
|
415
|
-
| None = None,
|
|
378
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
379
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
416
380
|
**params: Unpack[Params],
|
|
417
381
|
) -> (
|
|
418
382
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -435,7 +399,7 @@ class OpenAIProvider(BaseProvider[OpenAI]):
|
|
|
435
399
|
ctx=ctx,
|
|
436
400
|
model_id=model_id,
|
|
437
401
|
messages=messages,
|
|
438
|
-
|
|
402
|
+
toolkit=toolkit,
|
|
439
403
|
format=format,
|
|
440
404
|
**params,
|
|
441
405
|
)
|