mirascope 2.0.2__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/_stubs.py +39 -18
- mirascope/api/_generated/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/client.py +91 -0
- mirascope/api/_generated/project_memberships/raw_client.py +239 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/reference.md +72 -0
- mirascope/llm/__init__.py +19 -0
- mirascope/llm/calls/decorator.py +17 -24
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +2 -4
- mirascope/llm/formatting/types.py +19 -2
- mirascope/llm/models/models.py +66 -146
- mirascope/llm/prompts/decorator.py +5 -16
- mirascope/llm/prompts/prompts.py +5 -13
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
- mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
- mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
- mirascope/llm/providers/anthropic/beta_provider.py +33 -69
- mirascope/llm/providers/anthropic/provider.py +52 -91
- mirascope/llm/providers/base/_utils.py +4 -9
- mirascope/llm/providers/base/base_provider.py +89 -205
- mirascope/llm/providers/google/_utils/decode.py +51 -1
- mirascope/llm/providers/google/_utils/encode.py +38 -21
- mirascope/llm/providers/google/provider.py +33 -69
- mirascope/llm/providers/mirascope/provider.py +25 -61
- mirascope/llm/providers/mlx/encoding/base.py +3 -6
- mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
- mirascope/llm/providers/mlx/mlx.py +9 -21
- mirascope/llm/providers/mlx/provider.py +33 -69
- mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
- mirascope/llm/providers/openai/completions/base_provider.py +34 -75
- mirascope/llm/providers/openai/provider.py +25 -61
- mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
- mirascope/llm/providers/openai/responses/provider.py +34 -75
- mirascope/llm/responses/__init__.py +2 -1
- mirascope/llm/responses/base_stream_response.py +4 -0
- mirascope/llm/responses/response.py +8 -12
- mirascope/llm/responses/stream_response.py +8 -12
- mirascope/llm/responses/usage.py +44 -0
- mirascope/llm/tools/__init__.py +24 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +4 -2
- mirascope/llm/tools/toolkit.py +24 -6
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/ops/__init__.py +19 -1
- mirascope/ops/_internal/instrumentation/__init__.py +20 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
- mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
- mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
- {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/RECORD +64 -54
- {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
- {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,7 +9,7 @@ from typing_extensions import Unpack
|
|
|
9
9
|
from anthropic import Anthropic, AsyncAnthropic
|
|
10
10
|
|
|
11
11
|
from ...context import Context, DepsT
|
|
12
|
-
from ...formatting import
|
|
12
|
+
from ...formatting import FormatSpec, FormattableT
|
|
13
13
|
from ...messages import Message
|
|
14
14
|
from ...responses import (
|
|
15
15
|
AsyncContextResponse,
|
|
@@ -22,13 +22,9 @@ from ...responses import (
|
|
|
22
22
|
StreamResponse,
|
|
23
23
|
)
|
|
24
24
|
from ...tools import (
|
|
25
|
-
AsyncContextTool,
|
|
26
25
|
AsyncContextToolkit,
|
|
27
|
-
AsyncTool,
|
|
28
26
|
AsyncToolkit,
|
|
29
|
-
ContextTool,
|
|
30
27
|
ContextToolkit,
|
|
31
|
-
Tool,
|
|
32
28
|
Toolkit,
|
|
33
29
|
)
|
|
34
30
|
from ..base import BaseProvider
|
|
@@ -63,18 +59,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
63
59
|
*,
|
|
64
60
|
model_id: str,
|
|
65
61
|
messages: Sequence[Message],
|
|
66
|
-
|
|
67
|
-
format:
|
|
68
|
-
| Format[FormattableT]
|
|
69
|
-
| OutputParser[FormattableT]
|
|
70
|
-
| None = None,
|
|
62
|
+
toolkit: Toolkit,
|
|
63
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
71
64
|
**params: Unpack[Params],
|
|
72
65
|
) -> Response | Response[FormattableT]:
|
|
73
66
|
"""Generate an `llm.Response` using the beta Anthropic API."""
|
|
74
67
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
75
68
|
model_id=model_id,
|
|
76
69
|
messages=messages,
|
|
77
|
-
tools=
|
|
70
|
+
tools=toolkit,
|
|
78
71
|
format=format,
|
|
79
72
|
params=params,
|
|
80
73
|
)
|
|
@@ -89,7 +82,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
89
82
|
model_id=model_id,
|
|
90
83
|
provider_model_name=model_name(model_id),
|
|
91
84
|
params=params,
|
|
92
|
-
tools=
|
|
85
|
+
tools=toolkit,
|
|
93
86
|
input_messages=input_messages,
|
|
94
87
|
assistant_message=assistant_message,
|
|
95
88
|
finish_reason=finish_reason,
|
|
@@ -103,20 +96,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
103
96
|
ctx: Context[DepsT],
|
|
104
97
|
model_id: str,
|
|
105
98
|
messages: Sequence[Message],
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
| None = None,
|
|
109
|
-
format: type[FormattableT]
|
|
110
|
-
| Format[FormattableT]
|
|
111
|
-
| OutputParser[FormattableT]
|
|
112
|
-
| None = None,
|
|
99
|
+
toolkit: ContextToolkit[DepsT],
|
|
100
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
113
101
|
**params: Unpack[Params],
|
|
114
102
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
115
103
|
"""Generate an `llm.ContextResponse` using the beta Anthropic API."""
|
|
116
104
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
117
105
|
model_id=model_id,
|
|
118
106
|
messages=messages,
|
|
119
|
-
tools=
|
|
107
|
+
tools=toolkit,
|
|
120
108
|
format=format,
|
|
121
109
|
params=params,
|
|
122
110
|
)
|
|
@@ -131,7 +119,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
131
119
|
model_id=model_id,
|
|
132
120
|
provider_model_name=model_name(model_id),
|
|
133
121
|
params=params,
|
|
134
|
-
tools=
|
|
122
|
+
tools=toolkit,
|
|
135
123
|
input_messages=input_messages,
|
|
136
124
|
assistant_message=assistant_message,
|
|
137
125
|
finish_reason=finish_reason,
|
|
@@ -144,18 +132,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
144
132
|
*,
|
|
145
133
|
model_id: str,
|
|
146
134
|
messages: Sequence[Message],
|
|
147
|
-
|
|
148
|
-
format:
|
|
149
|
-
| Format[FormattableT]
|
|
150
|
-
| OutputParser[FormattableT]
|
|
151
|
-
| None = None,
|
|
135
|
+
toolkit: AsyncToolkit,
|
|
136
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
152
137
|
**params: Unpack[Params],
|
|
153
138
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
154
139
|
"""Generate an `llm.AsyncResponse` using the beta Anthropic API."""
|
|
155
140
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
156
141
|
model_id=model_id,
|
|
157
142
|
messages=messages,
|
|
158
|
-
tools=
|
|
143
|
+
tools=toolkit,
|
|
159
144
|
format=format,
|
|
160
145
|
params=params,
|
|
161
146
|
)
|
|
@@ -170,7 +155,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
170
155
|
model_id=model_id,
|
|
171
156
|
provider_model_name=model_name(model_id),
|
|
172
157
|
params=params,
|
|
173
|
-
tools=
|
|
158
|
+
tools=toolkit,
|
|
174
159
|
input_messages=input_messages,
|
|
175
160
|
assistant_message=assistant_message,
|
|
176
161
|
finish_reason=finish_reason,
|
|
@@ -184,20 +169,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
184
169
|
ctx: Context[DepsT],
|
|
185
170
|
model_id: str,
|
|
186
171
|
messages: Sequence[Message],
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
| None = None,
|
|
190
|
-
format: type[FormattableT]
|
|
191
|
-
| Format[FormattableT]
|
|
192
|
-
| OutputParser[FormattableT]
|
|
193
|
-
| None = None,
|
|
172
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
173
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
194
174
|
**params: Unpack[Params],
|
|
195
175
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
196
176
|
"""Generate an `llm.AsyncContextResponse` using the beta Anthropic API."""
|
|
197
177
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
198
178
|
model_id=model_id,
|
|
199
179
|
messages=messages,
|
|
200
|
-
tools=
|
|
180
|
+
tools=toolkit,
|
|
201
181
|
format=format,
|
|
202
182
|
params=params,
|
|
203
183
|
)
|
|
@@ -212,7 +192,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
212
192
|
model_id=model_id,
|
|
213
193
|
provider_model_name=model_name(model_id),
|
|
214
194
|
params=params,
|
|
215
|
-
tools=
|
|
195
|
+
tools=toolkit,
|
|
216
196
|
input_messages=input_messages,
|
|
217
197
|
assistant_message=assistant_message,
|
|
218
198
|
finish_reason=finish_reason,
|
|
@@ -225,18 +205,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
225
205
|
*,
|
|
226
206
|
model_id: str,
|
|
227
207
|
messages: Sequence[Message],
|
|
228
|
-
|
|
229
|
-
format:
|
|
230
|
-
| Format[FormattableT]
|
|
231
|
-
| OutputParser[FormattableT]
|
|
232
|
-
| None = None,
|
|
208
|
+
toolkit: Toolkit,
|
|
209
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
233
210
|
**params: Unpack[Params],
|
|
234
211
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
235
212
|
"""Generate an `llm.StreamResponse` using the beta Anthropic API."""
|
|
236
213
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
237
214
|
model_id=model_id,
|
|
238
215
|
messages=messages,
|
|
239
|
-
tools=
|
|
216
|
+
tools=toolkit,
|
|
240
217
|
format=format,
|
|
241
218
|
params=params,
|
|
242
219
|
)
|
|
@@ -250,7 +227,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
250
227
|
model_id=model_id,
|
|
251
228
|
provider_model_name=model_name(model_id),
|
|
252
229
|
params=params,
|
|
253
|
-
tools=
|
|
230
|
+
tools=toolkit,
|
|
254
231
|
input_messages=input_messages,
|
|
255
232
|
chunk_iterator=chunk_iterator,
|
|
256
233
|
format=resolved_format,
|
|
@@ -262,20 +239,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
262
239
|
ctx: Context[DepsT],
|
|
263
240
|
model_id: str,
|
|
264
241
|
messages: Sequence[Message],
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
| None = None,
|
|
268
|
-
format: type[FormattableT]
|
|
269
|
-
| Format[FormattableT]
|
|
270
|
-
| OutputParser[FormattableT]
|
|
271
|
-
| None = None,
|
|
242
|
+
toolkit: ContextToolkit[DepsT],
|
|
243
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
272
244
|
**params: Unpack[Params],
|
|
273
245
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
274
246
|
"""Generate an `llm.ContextStreamResponse` using the beta Anthropic API."""
|
|
275
247
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
276
248
|
model_id=model_id,
|
|
277
249
|
messages=messages,
|
|
278
|
-
tools=
|
|
250
|
+
tools=toolkit,
|
|
279
251
|
format=format,
|
|
280
252
|
params=params,
|
|
281
253
|
)
|
|
@@ -289,7 +261,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
289
261
|
model_id=model_id,
|
|
290
262
|
provider_model_name=model_name(model_id),
|
|
291
263
|
params=params,
|
|
292
|
-
tools=
|
|
264
|
+
tools=toolkit,
|
|
293
265
|
input_messages=input_messages,
|
|
294
266
|
chunk_iterator=chunk_iterator,
|
|
295
267
|
format=resolved_format,
|
|
@@ -300,18 +272,15 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
300
272
|
*,
|
|
301
273
|
model_id: str,
|
|
302
274
|
messages: Sequence[Message],
|
|
303
|
-
|
|
304
|
-
format:
|
|
305
|
-
| Format[FormattableT]
|
|
306
|
-
| OutputParser[FormattableT]
|
|
307
|
-
| None = None,
|
|
275
|
+
toolkit: AsyncToolkit,
|
|
276
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
308
277
|
**params: Unpack[Params],
|
|
309
278
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
310
279
|
"""Generate an `llm.AsyncStreamResponse` using the beta Anthropic API."""
|
|
311
280
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
312
281
|
model_id=model_id,
|
|
313
282
|
messages=messages,
|
|
314
|
-
tools=
|
|
283
|
+
tools=toolkit,
|
|
315
284
|
format=format,
|
|
316
285
|
params=params,
|
|
317
286
|
)
|
|
@@ -325,7 +294,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
325
294
|
model_id=model_id,
|
|
326
295
|
provider_model_name=model_name(model_id),
|
|
327
296
|
params=params,
|
|
328
|
-
tools=
|
|
297
|
+
tools=toolkit,
|
|
329
298
|
input_messages=input_messages,
|
|
330
299
|
chunk_iterator=chunk_iterator,
|
|
331
300
|
format=resolved_format,
|
|
@@ -337,13 +306,8 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
337
306
|
ctx: Context[DepsT],
|
|
338
307
|
model_id: str,
|
|
339
308
|
messages: Sequence[Message],
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
| None = None,
|
|
343
|
-
format: type[FormattableT]
|
|
344
|
-
| Format[FormattableT]
|
|
345
|
-
| OutputParser[FormattableT]
|
|
346
|
-
| None = None,
|
|
309
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
310
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
347
311
|
**params: Unpack[Params],
|
|
348
312
|
) -> (
|
|
349
313
|
AsyncContextStreamResponse[DepsT]
|
|
@@ -353,7 +317,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
353
317
|
input_messages, resolved_format, kwargs = beta_encode.beta_encode_request(
|
|
354
318
|
model_id=model_id,
|
|
355
319
|
messages=messages,
|
|
356
|
-
tools=
|
|
320
|
+
tools=toolkit,
|
|
357
321
|
format=format,
|
|
358
322
|
params=params,
|
|
359
323
|
)
|
|
@@ -367,7 +331,7 @@ class AnthropicBetaProvider(BaseProvider[Anthropic]):
|
|
|
367
331
|
model_id=model_id,
|
|
368
332
|
provider_model_name=model_name(model_id),
|
|
369
333
|
params=params,
|
|
370
|
-
tools=
|
|
334
|
+
tools=toolkit,
|
|
371
335
|
input_messages=input_messages,
|
|
372
336
|
chunk_iterator=chunk_iterator,
|
|
373
337
|
format=resolved_format,
|
|
@@ -9,7 +9,7 @@ from typing_extensions import Unpack
|
|
|
9
9
|
from anthropic import Anthropic, AsyncAnthropic
|
|
10
10
|
|
|
11
11
|
from ...context import Context, DepsT
|
|
12
|
-
from ...formatting import
|
|
12
|
+
from ...formatting import FormatSpec, FormattableT, resolve_format
|
|
13
13
|
from ...messages import Message
|
|
14
14
|
from ...responses import (
|
|
15
15
|
AsyncContextResponse,
|
|
@@ -23,14 +23,10 @@ from ...responses import (
|
|
|
23
23
|
)
|
|
24
24
|
from ...tools import (
|
|
25
25
|
AnyToolSchema,
|
|
26
|
-
AsyncContextTool,
|
|
27
26
|
AsyncContextToolkit,
|
|
28
|
-
AsyncTool,
|
|
29
27
|
AsyncToolkit,
|
|
30
28
|
BaseToolkit,
|
|
31
|
-
ContextTool,
|
|
32
29
|
ContextToolkit,
|
|
33
|
-
Tool,
|
|
34
30
|
Toolkit,
|
|
35
31
|
)
|
|
36
32
|
from ..base import BaseProvider, _utils as _base_utils
|
|
@@ -45,11 +41,8 @@ if TYPE_CHECKING:
|
|
|
45
41
|
|
|
46
42
|
def _should_use_beta(
|
|
47
43
|
model_id: AnthropicModelId,
|
|
48
|
-
format:
|
|
49
|
-
|
|
50
|
-
| OutputParser[FormattableT]
|
|
51
|
-
| None,
|
|
52
|
-
tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
|
|
44
|
+
format: FormatSpec[FormattableT] | None,
|
|
45
|
+
tools: BaseToolkit[AnyToolSchema],
|
|
53
46
|
) -> bool:
|
|
54
47
|
"""Determine whether to use the beta API based on format mode or strict tools.
|
|
55
48
|
|
|
@@ -65,7 +58,7 @@ def _should_use_beta(
|
|
|
65
58
|
if resolved is not None and resolved.mode == "strict":
|
|
66
59
|
return True
|
|
67
60
|
|
|
68
|
-
return _base_utils.has_strict_tools(tools)
|
|
61
|
+
return _base_utils.has_strict_tools(tools.tools)
|
|
69
62
|
|
|
70
63
|
|
|
71
64
|
class AnthropicProvider(BaseProvider[Anthropic]):
|
|
@@ -93,19 +86,16 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
93
86
|
*,
|
|
94
87
|
model_id: AnthropicModelId,
|
|
95
88
|
messages: Sequence[Message],
|
|
96
|
-
|
|
97
|
-
format:
|
|
98
|
-
| Format[FormattableT]
|
|
99
|
-
| OutputParser[FormattableT]
|
|
100
|
-
| None = None,
|
|
89
|
+
toolkit: Toolkit,
|
|
90
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
101
91
|
**params: Unpack[Params],
|
|
102
92
|
) -> Response | Response[FormattableT]:
|
|
103
93
|
"""Generate an `llm.Response` by synchronously calling the Anthropic Messages API."""
|
|
104
|
-
if _should_use_beta(model_id, format,
|
|
94
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
105
95
|
return self._beta_provider.call(
|
|
106
96
|
model_id=model_id,
|
|
107
97
|
messages=messages,
|
|
108
|
-
|
|
98
|
+
toolkit=toolkit,
|
|
109
99
|
format=format,
|
|
110
100
|
**params,
|
|
111
101
|
)
|
|
@@ -113,7 +103,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
113
103
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
114
104
|
model_id=model_id,
|
|
115
105
|
messages=messages,
|
|
116
|
-
tools=
|
|
106
|
+
tools=toolkit,
|
|
117
107
|
format=format,
|
|
118
108
|
params=params,
|
|
119
109
|
)
|
|
@@ -128,7 +118,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
128
118
|
model_id=model_id,
|
|
129
119
|
provider_model_name=model_name(model_id),
|
|
130
120
|
params=params,
|
|
131
|
-
tools=
|
|
121
|
+
tools=toolkit,
|
|
132
122
|
input_messages=input_messages,
|
|
133
123
|
assistant_message=assistant_message,
|
|
134
124
|
finish_reason=finish_reason,
|
|
@@ -142,22 +132,17 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
142
132
|
ctx: Context[DepsT],
|
|
143
133
|
model_id: AnthropicModelId,
|
|
144
134
|
messages: Sequence[Message],
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
| None = None,
|
|
148
|
-
format: type[FormattableT]
|
|
149
|
-
| Format[FormattableT]
|
|
150
|
-
| OutputParser[FormattableT]
|
|
151
|
-
| None = None,
|
|
135
|
+
toolkit: ContextToolkit[DepsT],
|
|
136
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
152
137
|
**params: Unpack[Params],
|
|
153
138
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
154
139
|
"""Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API."""
|
|
155
|
-
if _should_use_beta(model_id, format,
|
|
140
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
156
141
|
return self._beta_provider.context_call(
|
|
157
142
|
ctx=ctx,
|
|
158
143
|
model_id=model_id,
|
|
159
144
|
messages=messages,
|
|
160
|
-
|
|
145
|
+
toolkit=toolkit,
|
|
161
146
|
format=format,
|
|
162
147
|
**params,
|
|
163
148
|
)
|
|
@@ -165,7 +150,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
165
150
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
166
151
|
model_id=model_id,
|
|
167
152
|
messages=messages,
|
|
168
|
-
tools=
|
|
153
|
+
tools=toolkit,
|
|
169
154
|
format=format,
|
|
170
155
|
params=params,
|
|
171
156
|
)
|
|
@@ -180,7 +165,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
180
165
|
model_id=model_id,
|
|
181
166
|
provider_model_name=model_name(model_id),
|
|
182
167
|
params=params,
|
|
183
|
-
tools=
|
|
168
|
+
tools=toolkit,
|
|
184
169
|
input_messages=input_messages,
|
|
185
170
|
assistant_message=assistant_message,
|
|
186
171
|
finish_reason=finish_reason,
|
|
@@ -193,19 +178,16 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
193
178
|
*,
|
|
194
179
|
model_id: AnthropicModelId,
|
|
195
180
|
messages: Sequence[Message],
|
|
196
|
-
|
|
197
|
-
format:
|
|
198
|
-
| Format[FormattableT]
|
|
199
|
-
| OutputParser[FormattableT]
|
|
200
|
-
| None = None,
|
|
181
|
+
toolkit: AsyncToolkit,
|
|
182
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
201
183
|
**params: Unpack[Params],
|
|
202
184
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
203
185
|
"""Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API."""
|
|
204
|
-
if _should_use_beta(model_id, format,
|
|
186
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
205
187
|
return await self._beta_provider.call_async(
|
|
206
188
|
model_id=model_id,
|
|
207
189
|
messages=messages,
|
|
208
|
-
|
|
190
|
+
toolkit=toolkit,
|
|
209
191
|
format=format,
|
|
210
192
|
**params,
|
|
211
193
|
)
|
|
@@ -213,7 +195,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
213
195
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
214
196
|
model_id=model_id,
|
|
215
197
|
messages=messages,
|
|
216
|
-
tools=
|
|
198
|
+
tools=toolkit,
|
|
217
199
|
format=format,
|
|
218
200
|
params=params,
|
|
219
201
|
)
|
|
@@ -228,7 +210,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
228
210
|
model_id=model_id,
|
|
229
211
|
provider_model_name=model_name(model_id),
|
|
230
212
|
params=params,
|
|
231
|
-
tools=
|
|
213
|
+
tools=toolkit,
|
|
232
214
|
input_messages=input_messages,
|
|
233
215
|
assistant_message=assistant_message,
|
|
234
216
|
finish_reason=finish_reason,
|
|
@@ -242,22 +224,17 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
242
224
|
ctx: Context[DepsT],
|
|
243
225
|
model_id: AnthropicModelId,
|
|
244
226
|
messages: Sequence[Message],
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
| None = None,
|
|
248
|
-
format: type[FormattableT]
|
|
249
|
-
| Format[FormattableT]
|
|
250
|
-
| OutputParser[FormattableT]
|
|
251
|
-
| None = None,
|
|
227
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
228
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
252
229
|
**params: Unpack[Params],
|
|
253
230
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
254
231
|
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API."""
|
|
255
|
-
if _should_use_beta(model_id, format,
|
|
232
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
256
233
|
return await self._beta_provider.context_call_async(
|
|
257
234
|
ctx=ctx,
|
|
258
235
|
model_id=model_id,
|
|
259
236
|
messages=messages,
|
|
260
|
-
|
|
237
|
+
toolkit=toolkit,
|
|
261
238
|
format=format,
|
|
262
239
|
**params,
|
|
263
240
|
)
|
|
@@ -265,7 +242,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
265
242
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
266
243
|
model_id=model_id,
|
|
267
244
|
messages=messages,
|
|
268
|
-
tools=
|
|
245
|
+
tools=toolkit,
|
|
269
246
|
format=format,
|
|
270
247
|
params=params,
|
|
271
248
|
)
|
|
@@ -280,7 +257,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
280
257
|
model_id=model_id,
|
|
281
258
|
provider_model_name=model_name(model_id),
|
|
282
259
|
params=params,
|
|
283
|
-
tools=
|
|
260
|
+
tools=toolkit,
|
|
284
261
|
input_messages=input_messages,
|
|
285
262
|
assistant_message=assistant_message,
|
|
286
263
|
finish_reason=finish_reason,
|
|
@@ -293,19 +270,16 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
293
270
|
*,
|
|
294
271
|
model_id: AnthropicModelId,
|
|
295
272
|
messages: Sequence[Message],
|
|
296
|
-
|
|
297
|
-
format:
|
|
298
|
-
| Format[FormattableT]
|
|
299
|
-
| OutputParser[FormattableT]
|
|
300
|
-
| None = None,
|
|
273
|
+
toolkit: Toolkit,
|
|
274
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
301
275
|
**params: Unpack[Params],
|
|
302
276
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
303
277
|
"""Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
304
|
-
if _should_use_beta(model_id, format,
|
|
278
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
305
279
|
return self._beta_provider.stream(
|
|
306
280
|
model_id=model_id,
|
|
307
281
|
messages=messages,
|
|
308
|
-
|
|
282
|
+
toolkit=toolkit,
|
|
309
283
|
format=format,
|
|
310
284
|
**params,
|
|
311
285
|
)
|
|
@@ -313,7 +287,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
313
287
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
314
288
|
model_id=model_id,
|
|
315
289
|
messages=messages,
|
|
316
|
-
tools=
|
|
290
|
+
tools=toolkit,
|
|
317
291
|
format=format,
|
|
318
292
|
params=params,
|
|
319
293
|
)
|
|
@@ -327,7 +301,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
327
301
|
model_id=model_id,
|
|
328
302
|
provider_model_name=model_name(model_id),
|
|
329
303
|
params=params,
|
|
330
|
-
tools=
|
|
304
|
+
tools=toolkit,
|
|
331
305
|
input_messages=input_messages,
|
|
332
306
|
chunk_iterator=chunk_iterator,
|
|
333
307
|
format=resolved_format,
|
|
@@ -339,22 +313,17 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
339
313
|
ctx: Context[DepsT],
|
|
340
314
|
model_id: AnthropicModelId,
|
|
341
315
|
messages: Sequence[Message],
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
| None = None,
|
|
345
|
-
format: type[FormattableT]
|
|
346
|
-
| Format[FormattableT]
|
|
347
|
-
| OutputParser[FormattableT]
|
|
348
|
-
| None = None,
|
|
316
|
+
toolkit: ContextToolkit[DepsT],
|
|
317
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
349
318
|
**params: Unpack[Params],
|
|
350
319
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
351
320
|
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
352
|
-
if _should_use_beta(model_id, format,
|
|
321
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
353
322
|
return self._beta_provider.context_stream(
|
|
354
323
|
ctx=ctx,
|
|
355
324
|
model_id=model_id,
|
|
356
325
|
messages=messages,
|
|
357
|
-
|
|
326
|
+
toolkit=toolkit,
|
|
358
327
|
format=format,
|
|
359
328
|
**params,
|
|
360
329
|
)
|
|
@@ -362,7 +331,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
362
331
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
363
332
|
model_id=model_id,
|
|
364
333
|
messages=messages,
|
|
365
|
-
tools=
|
|
334
|
+
tools=toolkit,
|
|
366
335
|
format=format,
|
|
367
336
|
params=params,
|
|
368
337
|
)
|
|
@@ -376,7 +345,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
376
345
|
model_id=model_id,
|
|
377
346
|
provider_model_name=model_name(model_id),
|
|
378
347
|
params=params,
|
|
379
|
-
tools=
|
|
348
|
+
tools=toolkit,
|
|
380
349
|
input_messages=input_messages,
|
|
381
350
|
chunk_iterator=chunk_iterator,
|
|
382
351
|
format=resolved_format,
|
|
@@ -387,26 +356,23 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
387
356
|
*,
|
|
388
357
|
model_id: AnthropicModelId,
|
|
389
358
|
messages: Sequence[Message],
|
|
390
|
-
|
|
391
|
-
format:
|
|
392
|
-
| Format[FormattableT]
|
|
393
|
-
| OutputParser[FormattableT]
|
|
394
|
-
| None = None,
|
|
359
|
+
toolkit: AsyncToolkit,
|
|
360
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
395
361
|
**params: Unpack[Params],
|
|
396
362
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
397
363
|
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
398
|
-
if _should_use_beta(model_id, format,
|
|
364
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
399
365
|
return await self._beta_provider.stream_async(
|
|
400
366
|
model_id=model_id,
|
|
401
367
|
messages=messages,
|
|
402
|
-
|
|
368
|
+
toolkit=toolkit,
|
|
403
369
|
format=format,
|
|
404
370
|
**params,
|
|
405
371
|
)
|
|
406
372
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
407
373
|
model_id=model_id,
|
|
408
374
|
messages=messages,
|
|
409
|
-
tools=
|
|
375
|
+
tools=toolkit,
|
|
410
376
|
format=format,
|
|
411
377
|
params=params,
|
|
412
378
|
)
|
|
@@ -420,7 +386,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
420
386
|
model_id=model_id,
|
|
421
387
|
provider_model_name=model_name(model_id),
|
|
422
388
|
params=params,
|
|
423
|
-
tools=
|
|
389
|
+
tools=toolkit,
|
|
424
390
|
input_messages=input_messages,
|
|
425
391
|
chunk_iterator=chunk_iterator,
|
|
426
392
|
format=resolved_format,
|
|
@@ -432,25 +398,20 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
432
398
|
ctx: Context[DepsT],
|
|
433
399
|
model_id: AnthropicModelId,
|
|
434
400
|
messages: Sequence[Message],
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
| None = None,
|
|
438
|
-
format: type[FormattableT]
|
|
439
|
-
| Format[FormattableT]
|
|
440
|
-
| OutputParser[FormattableT]
|
|
441
|
-
| None = None,
|
|
401
|
+
toolkit: AsyncContextToolkit[DepsT],
|
|
402
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
442
403
|
**params: Unpack[Params],
|
|
443
404
|
) -> (
|
|
444
405
|
AsyncContextStreamResponse[DepsT]
|
|
445
406
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
446
407
|
):
|
|
447
408
|
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
448
|
-
if _should_use_beta(model_id, format,
|
|
409
|
+
if _should_use_beta(model_id, format, toolkit):
|
|
449
410
|
return await self._beta_provider.context_stream_async(
|
|
450
411
|
ctx=ctx,
|
|
451
412
|
model_id=model_id,
|
|
452
413
|
messages=messages,
|
|
453
|
-
|
|
414
|
+
toolkit=toolkit,
|
|
454
415
|
format=format,
|
|
455
416
|
**params,
|
|
456
417
|
)
|
|
@@ -458,7 +419,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
458
419
|
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
459
420
|
model_id=model_id,
|
|
460
421
|
messages=messages,
|
|
461
|
-
tools=
|
|
422
|
+
tools=toolkit,
|
|
462
423
|
format=format,
|
|
463
424
|
params=params,
|
|
464
425
|
)
|
|
@@ -472,7 +433,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
472
433
|
model_id=model_id,
|
|
473
434
|
provider_model_name=model_name(model_id),
|
|
474
435
|
params=params,
|
|
475
|
-
tools=
|
|
436
|
+
tools=toolkit,
|
|
476
437
|
input_messages=input_messages,
|
|
477
438
|
chunk_iterator=chunk_iterator,
|
|
478
439
|
format=resolved_format,
|