mirascope 2.0.0a3__py3-none-any.whl → 2.0.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/__init__.py +78 -6
- mirascope/api/_generated/api_keys/__init__.py +7 -0
- mirascope/api/_generated/api_keys/client.py +453 -0
- mirascope/api/_generated/api_keys/raw_client.py +853 -0
- mirascope/api/_generated/api_keys/types/__init__.py +9 -0
- mirascope/api/_generated/api_keys/types/api_keys_create_response.py +36 -0
- mirascope/api/_generated/api_keys/types/api_keys_get_response.py +35 -0
- mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +35 -0
- mirascope/api/_generated/client.py +14 -0
- mirascope/api/_generated/environments/__init__.py +17 -0
- mirascope/api/_generated/environments/client.py +532 -0
- mirascope/api/_generated/environments/raw_client.py +1088 -0
- mirascope/api/_generated/environments/types/__init__.py +15 -0
- mirascope/api/_generated/environments/types/environments_create_response.py +26 -0
- mirascope/api/_generated/environments/types/environments_get_response.py +26 -0
- mirascope/api/_generated/environments/types/environments_list_response_item.py +26 -0
- mirascope/api/_generated/environments/types/environments_update_response.py +26 -0
- mirascope/api/_generated/errors/__init__.py +11 -1
- mirascope/api/_generated/errors/conflict_error.py +15 -0
- mirascope/api/_generated/errors/forbidden_error.py +15 -0
- mirascope/api/_generated/errors/internal_server_error.py +15 -0
- mirascope/api/_generated/errors/not_found_error.py +15 -0
- mirascope/api/_generated/organizations/__init__.py +25 -0
- mirascope/api/_generated/organizations/client.py +404 -0
- mirascope/api/_generated/organizations/raw_client.py +902 -0
- mirascope/api/_generated/organizations/types/__init__.py +23 -0
- mirascope/api/_generated/organizations/types/organizations_create_response.py +25 -0
- mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_get_response.py +25 -0
- mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item.py +25 -0
- mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
- mirascope/api/_generated/organizations/types/organizations_update_response.py +25 -0
- mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
- mirascope/api/_generated/projects/__init__.py +17 -0
- mirascope/api/_generated/projects/client.py +482 -0
- mirascope/api/_generated/projects/raw_client.py +1058 -0
- mirascope/api/_generated/projects/types/__init__.py +15 -0
- mirascope/api/_generated/projects/types/projects_create_response.py +31 -0
- mirascope/api/_generated/projects/types/projects_get_response.py +31 -0
- mirascope/api/_generated/projects/types/projects_list_response_item.py +31 -0
- mirascope/api/_generated/projects/types/projects_update_response.py +31 -0
- mirascope/api/_generated/reference.md +1311 -0
- mirascope/api/_generated/types/__init__.py +20 -4
- mirascope/api/_generated/types/already_exists_error.py +24 -0
- mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
- mirascope/api/_generated/types/database_error.py +24 -0
- mirascope/api/_generated/types/database_error_tag.py +5 -0
- mirascope/api/_generated/types/http_api_decode_error.py +1 -3
- mirascope/api/_generated/types/issue.py +1 -5
- mirascope/api/_generated/types/not_found_error_body.py +24 -0
- mirascope/api/_generated/types/not_found_error_tag.py +5 -0
- mirascope/api/_generated/types/permission_denied_error.py +24 -0
- mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
- mirascope/api/_generated/types/property_key.py +2 -2
- mirascope/api/_generated/types/{property_key_tag.py → property_key_key.py} +3 -5
- mirascope/api/_generated/types/{property_key_tag_tag.py → property_key_key_tag.py} +1 -1
- mirascope/llm/__init__.py +6 -2
- mirascope/llm/exceptions.py +28 -0
- mirascope/llm/providers/__init__.py +12 -4
- mirascope/llm/providers/anthropic/__init__.py +6 -1
- mirascope/llm/providers/anthropic/_utils/__init__.py +17 -5
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
- mirascope/llm/providers/anthropic/_utils/decode.py +39 -7
- mirascope/llm/providers/anthropic/_utils/encode.py +156 -64
- mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
- mirascope/llm/providers/anthropic/beta_provider.py +328 -0
- mirascope/llm/providers/anthropic/model_id.py +10 -27
- mirascope/llm/providers/anthropic/model_info.py +87 -0
- mirascope/llm/providers/anthropic/provider.py +132 -145
- mirascope/llm/providers/base/__init__.py +2 -1
- mirascope/llm/providers/base/_utils.py +15 -1
- mirascope/llm/providers/base/base_provider.py +173 -58
- mirascope/llm/providers/google/_utils/__init__.py +2 -0
- mirascope/llm/providers/google/_utils/decode.py +55 -3
- mirascope/llm/providers/google/_utils/encode.py +14 -6
- mirascope/llm/providers/google/_utils/errors.py +49 -0
- mirascope/llm/providers/google/model_id.py +7 -13
- mirascope/llm/providers/google/model_info.py +62 -0
- mirascope/llm/providers/google/provider.py +13 -8
- mirascope/llm/providers/mlx/_utils.py +31 -2
- mirascope/llm/providers/mlx/encoding/transformers.py +17 -1
- mirascope/llm/providers/mlx/provider.py +12 -0
- mirascope/llm/providers/ollama/__init__.py +19 -0
- mirascope/llm/providers/ollama/provider.py +71 -0
- mirascope/llm/providers/openai/__init__.py +10 -1
- mirascope/llm/providers/openai/_utils/__init__.py +5 -0
- mirascope/llm/providers/openai/_utils/errors.py +46 -0
- mirascope/llm/providers/openai/completions/__init__.py +6 -1
- mirascope/llm/providers/openai/completions/_utils/decode.py +57 -5
- mirascope/llm/providers/openai/completions/_utils/encode.py +9 -8
- mirascope/llm/providers/openai/completions/base_provider.py +513 -0
- mirascope/llm/providers/openai/completions/provider.py +13 -447
- mirascope/llm/providers/openai/model_info.py +57 -0
- mirascope/llm/providers/openai/provider.py +30 -5
- mirascope/llm/providers/openai/responses/_utils/decode.py +55 -4
- mirascope/llm/providers/openai/responses/_utils/encode.py +9 -9
- mirascope/llm/providers/openai/responses/provider.py +33 -28
- mirascope/llm/providers/provider_id.py +11 -1
- mirascope/llm/providers/provider_registry.py +59 -4
- mirascope/llm/providers/together/__init__.py +19 -0
- mirascope/llm/providers/together/provider.py +40 -0
- mirascope/llm/responses/__init__.py +3 -0
- mirascope/llm/responses/base_response.py +4 -0
- mirascope/llm/responses/base_stream_response.py +25 -1
- mirascope/llm/responses/finish_reason.py +1 -0
- mirascope/llm/responses/response.py +9 -0
- mirascope/llm/responses/root_response.py +5 -1
- mirascope/llm/responses/usage.py +95 -0
- mirascope/ops/_internal/closure.py +62 -11
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a5.dist-info}/METADATA +3 -3
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a5.dist-info}/RECORD +115 -56
- mirascope/llm/providers/load_provider.py +0 -48
- mirascope/llm/providers/openai/shared/__init__.py +0 -7
- mirascope/llm/providers/openai/shared/_utils.py +0 -59
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a5.dist-info}/WHEEL +0 -0
- {mirascope-2.0.0a3.dist-info → mirascope-2.0.0a5.dist-info}/licenses/LICENSE +0 -0
|
@@ -6,7 +6,7 @@ from typing_extensions import Unpack
|
|
|
6
6
|
from anthropic import Anthropic, AsyncAnthropic
|
|
7
7
|
|
|
8
8
|
from ...context import Context, DepsT
|
|
9
|
-
from ...formatting import Format, FormattableT
|
|
9
|
+
from ...formatting import Format, FormattableT, resolve_format
|
|
10
10
|
from ...messages import Message
|
|
11
11
|
from ...responses import (
|
|
12
12
|
AsyncContextResponse,
|
|
@@ -30,7 +30,24 @@ from ...tools import (
|
|
|
30
30
|
)
|
|
31
31
|
from ..base import BaseProvider, Params
|
|
32
32
|
from . import _utils
|
|
33
|
+
from .beta_provider import AnthropicBetaProvider
|
|
33
34
|
from .model_id import AnthropicModelId, model_name
|
|
35
|
+
from .model_info import MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _should_use_beta(
|
|
39
|
+
model_id: AnthropicModelId,
|
|
40
|
+
format: type[FormattableT] | Format[FormattableT] | None,
|
|
41
|
+
) -> bool:
|
|
42
|
+
"""Determine whether to use the beta API based on format mode.
|
|
43
|
+
|
|
44
|
+
If the format resolves to strict mode, and the model plausibly has
|
|
45
|
+
strict structured output support, then we will use the beta provider.
|
|
46
|
+
"""
|
|
47
|
+
resolved = resolve_format(format, default_mode=_utils.DEFAULT_FORMAT_MODE)
|
|
48
|
+
if resolved is None or resolved.mode != "strict":
|
|
49
|
+
return False
|
|
50
|
+
return model_name(model_id) not in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
|
|
34
51
|
|
|
35
52
|
|
|
36
53
|
class AnthropicProvider(BaseProvider[Anthropic]):
|
|
@@ -38,6 +55,8 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
38
55
|
|
|
39
56
|
id = "anthropic"
|
|
40
57
|
default_scope = "anthropic/"
|
|
58
|
+
error_map = _utils.ANTHROPIC_ERROR_MAP
|
|
59
|
+
_beta_provider: AnthropicBetaProvider
|
|
41
60
|
|
|
42
61
|
def __init__(
|
|
43
62
|
self, *, api_key: str | None = None, base_url: str | None = None
|
|
@@ -45,6 +64,11 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
45
64
|
"""Initialize the Anthropic client."""
|
|
46
65
|
self.client = Anthropic(api_key=api_key, base_url=base_url)
|
|
47
66
|
self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
|
|
67
|
+
self._beta_provider = AnthropicBetaProvider(api_key=api_key, base_url=base_url)
|
|
68
|
+
|
|
69
|
+
def get_error_status(self, e: Exception) -> int | None:
|
|
70
|
+
"""Extract HTTP status code from Anthropic exception."""
|
|
71
|
+
return getattr(e, "status_code", None)
|
|
48
72
|
|
|
49
73
|
def _call(
|
|
50
74
|
self,
|
|
@@ -55,32 +79,27 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
55
79
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
56
80
|
**params: Unpack[Params],
|
|
57
81
|
) -> Response | Response[FormattableT]:
|
|
58
|
-
"""Generate an `llm.Response` by synchronously calling the Anthropic Messages API.
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
"""
|
|
70
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
82
|
+
"""Generate an `llm.Response` by synchronously calling the Anthropic Messages API."""
|
|
83
|
+
if _should_use_beta(model_id, format):
|
|
84
|
+
return self._beta_provider.call(
|
|
85
|
+
model_id=model_id,
|
|
86
|
+
messages=messages,
|
|
87
|
+
tools=tools,
|
|
88
|
+
format=format,
|
|
89
|
+
**params,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
71
93
|
model_id=model_id,
|
|
72
94
|
messages=messages,
|
|
73
95
|
tools=tools,
|
|
74
96
|
format=format,
|
|
75
97
|
params=params,
|
|
76
98
|
)
|
|
77
|
-
|
|
78
99
|
anthropic_response = self.client.messages.create(**kwargs)
|
|
79
|
-
|
|
80
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
100
|
+
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
81
101
|
anthropic_response, model_id
|
|
82
102
|
)
|
|
83
|
-
|
|
84
103
|
return Response(
|
|
85
104
|
raw=anthropic_response,
|
|
86
105
|
provider_id="anthropic",
|
|
@@ -91,7 +110,8 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
91
110
|
input_messages=input_messages,
|
|
92
111
|
assistant_message=assistant_message,
|
|
93
112
|
finish_reason=finish_reason,
|
|
94
|
-
|
|
113
|
+
usage=usage,
|
|
114
|
+
format=resolved_format,
|
|
95
115
|
)
|
|
96
116
|
|
|
97
117
|
def _context_call(
|
|
@@ -106,33 +126,28 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
106
126
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
107
127
|
**params: Unpack[Params],
|
|
108
128
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
109
|
-
"""Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API.
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
"""
|
|
122
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
129
|
+
"""Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API."""
|
|
130
|
+
if _should_use_beta(model_id, format):
|
|
131
|
+
return self._beta_provider.context_call(
|
|
132
|
+
ctx=ctx,
|
|
133
|
+
model_id=model_id,
|
|
134
|
+
messages=messages,
|
|
135
|
+
tools=tools,
|
|
136
|
+
format=format,
|
|
137
|
+
**params,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
123
141
|
model_id=model_id,
|
|
124
142
|
messages=messages,
|
|
125
143
|
tools=tools,
|
|
126
144
|
format=format,
|
|
127
145
|
params=params,
|
|
128
146
|
)
|
|
129
|
-
|
|
130
147
|
anthropic_response = self.client.messages.create(**kwargs)
|
|
131
|
-
|
|
132
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
148
|
+
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
133
149
|
anthropic_response, model_id
|
|
134
150
|
)
|
|
135
|
-
|
|
136
151
|
return ContextResponse(
|
|
137
152
|
raw=anthropic_response,
|
|
138
153
|
provider_id="anthropic",
|
|
@@ -143,7 +158,8 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
143
158
|
input_messages=input_messages,
|
|
144
159
|
assistant_message=assistant_message,
|
|
145
160
|
finish_reason=finish_reason,
|
|
146
|
-
|
|
161
|
+
usage=usage,
|
|
162
|
+
format=resolved_format,
|
|
147
163
|
)
|
|
148
164
|
|
|
149
165
|
async def _call_async(
|
|
@@ -155,32 +171,27 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
155
171
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
156
172
|
**params: Unpack[Params],
|
|
157
173
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
158
|
-
"""Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API.
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
"""
|
|
170
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
174
|
+
"""Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API."""
|
|
175
|
+
if _should_use_beta(model_id, format):
|
|
176
|
+
return await self._beta_provider.call_async(
|
|
177
|
+
model_id=model_id,
|
|
178
|
+
messages=messages,
|
|
179
|
+
tools=tools,
|
|
180
|
+
format=format,
|
|
181
|
+
**params,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
171
185
|
model_id=model_id,
|
|
172
186
|
messages=messages,
|
|
173
187
|
tools=tools,
|
|
174
188
|
format=format,
|
|
175
189
|
params=params,
|
|
176
190
|
)
|
|
177
|
-
|
|
178
191
|
anthropic_response = await self.async_client.messages.create(**kwargs)
|
|
179
|
-
|
|
180
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
192
|
+
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
181
193
|
anthropic_response, model_id
|
|
182
194
|
)
|
|
183
|
-
|
|
184
195
|
return AsyncResponse(
|
|
185
196
|
raw=anthropic_response,
|
|
186
197
|
provider_id="anthropic",
|
|
@@ -191,7 +202,8 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
191
202
|
input_messages=input_messages,
|
|
192
203
|
assistant_message=assistant_message,
|
|
193
204
|
finish_reason=finish_reason,
|
|
194
|
-
|
|
205
|
+
usage=usage,
|
|
206
|
+
format=resolved_format,
|
|
195
207
|
)
|
|
196
208
|
|
|
197
209
|
async def _context_call_async(
|
|
@@ -206,33 +218,28 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
206
218
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
207
219
|
**params: Unpack[Params],
|
|
208
220
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
209
|
-
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API.
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
"""
|
|
222
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
221
|
+
"""Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API."""
|
|
222
|
+
if _should_use_beta(model_id, format):
|
|
223
|
+
return await self._beta_provider.context_call_async(
|
|
224
|
+
ctx=ctx,
|
|
225
|
+
model_id=model_id,
|
|
226
|
+
messages=messages,
|
|
227
|
+
tools=tools,
|
|
228
|
+
format=format,
|
|
229
|
+
**params,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
223
233
|
model_id=model_id,
|
|
224
234
|
messages=messages,
|
|
225
235
|
tools=tools,
|
|
226
236
|
format=format,
|
|
227
237
|
params=params,
|
|
228
238
|
)
|
|
229
|
-
|
|
230
239
|
anthropic_response = await self.async_client.messages.create(**kwargs)
|
|
231
|
-
|
|
232
|
-
assistant_message, finish_reason = _utils.decode_response(
|
|
240
|
+
assistant_message, finish_reason, usage = _utils.decode_response(
|
|
233
241
|
anthropic_response, model_id
|
|
234
242
|
)
|
|
235
|
-
|
|
236
243
|
return AsyncContextResponse(
|
|
237
244
|
raw=anthropic_response,
|
|
238
245
|
provider_id="anthropic",
|
|
@@ -243,7 +250,8 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
243
250
|
input_messages=input_messages,
|
|
244
251
|
assistant_message=assistant_message,
|
|
245
252
|
finish_reason=finish_reason,
|
|
246
|
-
|
|
253
|
+
usage=usage,
|
|
254
|
+
format=resolved_format,
|
|
247
255
|
)
|
|
248
256
|
|
|
249
257
|
def _stream(
|
|
@@ -255,30 +263,25 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
255
263
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
256
264
|
**params: Unpack[Params],
|
|
257
265
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
258
|
-
"""Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API.
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
"""
|
|
270
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
266
|
+
"""Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
267
|
+
if _should_use_beta(model_id, format):
|
|
268
|
+
return self._beta_provider.stream(
|
|
269
|
+
model_id=model_id,
|
|
270
|
+
messages=messages,
|
|
271
|
+
tools=tools,
|
|
272
|
+
format=format,
|
|
273
|
+
**params,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
271
277
|
model_id=model_id,
|
|
272
278
|
messages=messages,
|
|
273
279
|
tools=tools,
|
|
274
280
|
format=format,
|
|
275
281
|
params=params,
|
|
276
282
|
)
|
|
277
|
-
|
|
278
283
|
anthropic_stream = self.client.messages.stream(**kwargs)
|
|
279
|
-
|
|
280
284
|
chunk_iterator = _utils.decode_stream(anthropic_stream)
|
|
281
|
-
|
|
282
285
|
return StreamResponse(
|
|
283
286
|
provider_id="anthropic",
|
|
284
287
|
model_id=model_id,
|
|
@@ -287,7 +290,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
287
290
|
tools=tools,
|
|
288
291
|
input_messages=input_messages,
|
|
289
292
|
chunk_iterator=chunk_iterator,
|
|
290
|
-
format=
|
|
293
|
+
format=resolved_format,
|
|
291
294
|
)
|
|
292
295
|
|
|
293
296
|
def _context_stream(
|
|
@@ -302,31 +305,26 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
302
305
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
303
306
|
**params: Unpack[Params],
|
|
304
307
|
) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
|
|
305
|
-
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API.
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
"""
|
|
318
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
308
|
+
"""Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API."""
|
|
309
|
+
if _should_use_beta(model_id, format):
|
|
310
|
+
return self._beta_provider.context_stream(
|
|
311
|
+
ctx=ctx,
|
|
312
|
+
model_id=model_id,
|
|
313
|
+
messages=messages,
|
|
314
|
+
tools=tools,
|
|
315
|
+
format=format,
|
|
316
|
+
**params,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
319
320
|
model_id=model_id,
|
|
320
321
|
messages=messages,
|
|
321
322
|
tools=tools,
|
|
322
323
|
format=format,
|
|
323
324
|
params=params,
|
|
324
325
|
)
|
|
325
|
-
|
|
326
326
|
anthropic_stream = self.client.messages.stream(**kwargs)
|
|
327
|
-
|
|
328
327
|
chunk_iterator = _utils.decode_stream(anthropic_stream)
|
|
329
|
-
|
|
330
328
|
return ContextStreamResponse(
|
|
331
329
|
provider_id="anthropic",
|
|
332
330
|
model_id=model_id,
|
|
@@ -335,7 +333,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
335
333
|
tools=tools,
|
|
336
334
|
input_messages=input_messages,
|
|
337
335
|
chunk_iterator=chunk_iterator,
|
|
338
|
-
format=
|
|
336
|
+
format=resolved_format,
|
|
339
337
|
)
|
|
340
338
|
|
|
341
339
|
async def _stream_async(
|
|
@@ -347,30 +345,24 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
347
345
|
format: type[FormattableT] | Format[FormattableT] | None = None,
|
|
348
346
|
**params: Unpack[Params],
|
|
349
347
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
350
|
-
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API.
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
|
|
361
|
-
"""
|
|
362
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
348
|
+
"""Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
349
|
+
if _should_use_beta(model_id, format):
|
|
350
|
+
return await self._beta_provider.stream_async(
|
|
351
|
+
model_id=model_id,
|
|
352
|
+
messages=messages,
|
|
353
|
+
tools=tools,
|
|
354
|
+
format=format,
|
|
355
|
+
**params,
|
|
356
|
+
)
|
|
357
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
363
358
|
model_id=model_id,
|
|
364
359
|
messages=messages,
|
|
365
360
|
tools=tools,
|
|
366
361
|
format=format,
|
|
367
362
|
params=params,
|
|
368
363
|
)
|
|
369
|
-
|
|
370
364
|
anthropic_stream = self.async_client.messages.stream(**kwargs)
|
|
371
|
-
|
|
372
365
|
chunk_iterator = _utils.decode_async_stream(anthropic_stream)
|
|
373
|
-
|
|
374
366
|
return AsyncStreamResponse(
|
|
375
367
|
provider_id="anthropic",
|
|
376
368
|
model_id=model_id,
|
|
@@ -379,7 +371,7 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
379
371
|
tools=tools,
|
|
380
372
|
input_messages=input_messages,
|
|
381
373
|
chunk_iterator=chunk_iterator,
|
|
382
|
-
format=
|
|
374
|
+
format=resolved_format,
|
|
383
375
|
)
|
|
384
376
|
|
|
385
377
|
async def _context_stream_async(
|
|
@@ -397,31 +389,26 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
397
389
|
AsyncContextStreamResponse[DepsT]
|
|
398
390
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
399
391
|
):
|
|
400
|
-
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API.
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
"""
|
|
413
|
-
input_messages, format, kwargs = _utils.encode_request(
|
|
392
|
+
"""Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
|
|
393
|
+
if _should_use_beta(model_id, format):
|
|
394
|
+
return await self._beta_provider.context_stream_async(
|
|
395
|
+
ctx=ctx,
|
|
396
|
+
model_id=model_id,
|
|
397
|
+
messages=messages,
|
|
398
|
+
tools=tools,
|
|
399
|
+
format=format,
|
|
400
|
+
**params,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
input_messages, resolved_format, kwargs = _utils.encode_request(
|
|
414
404
|
model_id=model_id,
|
|
415
405
|
messages=messages,
|
|
416
406
|
tools=tools,
|
|
417
407
|
format=format,
|
|
418
408
|
params=params,
|
|
419
409
|
)
|
|
420
|
-
|
|
421
410
|
anthropic_stream = self.async_client.messages.stream(**kwargs)
|
|
422
|
-
|
|
423
411
|
chunk_iterator = _utils.decode_async_stream(anthropic_stream)
|
|
424
|
-
|
|
425
412
|
return AsyncContextStreamResponse(
|
|
426
413
|
provider_id="anthropic",
|
|
427
414
|
model_id=model_id,
|
|
@@ -430,5 +417,5 @@ class AnthropicProvider(BaseProvider[Anthropic]):
|
|
|
430
417
|
tools=tools,
|
|
431
418
|
input_messages=input_messages,
|
|
432
419
|
chunk_iterator=chunk_iterator,
|
|
433
|
-
format=
|
|
420
|
+
format=resolved_format,
|
|
434
421
|
)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Base client interfaces and types."""
|
|
2
2
|
|
|
3
3
|
from . import _utils
|
|
4
|
-
from .base_provider import BaseProvider, Provider
|
|
4
|
+
from .base_provider import BaseProvider, Provider, ProviderErrorMap
|
|
5
5
|
from .kwargs import BaseKwargs, KwargsT
|
|
6
6
|
from .params import Params
|
|
7
7
|
|
|
@@ -11,5 +11,6 @@ __all__ = [
|
|
|
11
11
|
"KwargsT",
|
|
12
12
|
"Params",
|
|
13
13
|
"Provider",
|
|
14
|
+
"ProviderErrorMap",
|
|
14
15
|
"_utils",
|
|
15
16
|
]
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from collections.abc import Generator, Sequence
|
|
3
3
|
from contextlib import contextmanager
|
|
4
|
-
from typing import TYPE_CHECKING, TypeAlias, get_type_hints
|
|
4
|
+
from typing import TYPE_CHECKING, TypeAlias, cast, get_type_hints
|
|
5
5
|
|
|
6
6
|
from ...content import Text
|
|
7
7
|
from ...messages import AssistantMessage, Message, SystemMessage, UserMessage
|
|
@@ -16,6 +16,20 @@ logger = logging.getLogger(__name__)
|
|
|
16
16
|
SystemMessageContent: TypeAlias = str | None
|
|
17
17
|
|
|
18
18
|
|
|
19
|
+
def ensure_additional_properties_false(obj: object) -> None:
|
|
20
|
+
"""Recursively adds additionalProperties = False to a schema, required for strict mode."""
|
|
21
|
+
if isinstance(obj, dict):
|
|
22
|
+
obj = cast(dict[str, object], obj)
|
|
23
|
+
if obj.get("type") == "object" and "additionalProperties" not in obj:
|
|
24
|
+
obj["additionalProperties"] = False
|
|
25
|
+
for value in obj.values():
|
|
26
|
+
ensure_additional_properties_false(value)
|
|
27
|
+
elif isinstance(obj, list):
|
|
28
|
+
obj = cast(list[object], obj)
|
|
29
|
+
for item in obj:
|
|
30
|
+
ensure_additional_properties_false(item)
|
|
31
|
+
|
|
32
|
+
|
|
19
33
|
def add_system_instructions(
|
|
20
34
|
messages: Sequence[Message], additional_system_instructions: str
|
|
21
35
|
) -> Sequence[Message]:
|