pydantic-ai-slim 1.0.10__py3-none-any.whl → 1.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +134 -4
- pydantic_ai/_a2a.py +1 -1
- pydantic_ai/_function_schema.py +18 -10
- pydantic_ai/{profiles/_json_schema.py → _json_schema.py} +5 -3
- pydantic_ai/_output.py +1 -8
- pydantic_ai/_thinking_part.py +1 -1
- pydantic_ai/_utils.py +24 -7
- pydantic_ai/agent/__init__.py +1 -2
- pydantic_ai/builtin_tools.py +20 -1
- pydantic_ai/common_tools/duckduckgo.py +2 -2
- pydantic_ai/common_tools/tavily.py +2 -2
- pydantic_ai/direct.py +4 -4
- pydantic_ai/durable_exec/dbos/_agent.py +1 -1
- pydantic_ai/durable_exec/dbos/_mcp_server.py +1 -2
- pydantic_ai/durable_exec/dbos/_model.py +2 -2
- pydantic_ai/durable_exec/temporal/_agent.py +1 -1
- pydantic_ai/durable_exec/temporal/_function_toolset.py +1 -1
- pydantic_ai/durable_exec/temporal/_mcp_server.py +1 -1
- pydantic_ai/durable_exec/temporal/_model.py +3 -3
- pydantic_ai/durable_exec/temporal/_toolset.py +1 -3
- pydantic_ai/ext/aci.py +1 -1
- pydantic_ai/ext/langchain.py +1 -1
- pydantic_ai/mcp.py +21 -7
- pydantic_ai/messages.py +16 -11
- pydantic_ai/models/__init__.py +3 -82
- pydantic_ai/models/anthropic.py +36 -23
- pydantic_ai/models/bedrock.py +6 -5
- pydantic_ai/models/google.py +2 -2
- pydantic_ai/models/instrumented.py +27 -11
- pydantic_ai/models/openai.py +115 -33
- pydantic_ai/output.py +23 -2
- pydantic_ai/profiles/__init__.py +1 -1
- pydantic_ai/profiles/google.py +1 -1
- pydantic_ai/profiles/harmony.py +3 -1
- pydantic_ai/profiles/openai.py +1 -1
- pydantic_ai/providers/__init__.py +1 -1
- pydantic_ai/providers/anthropic.py +1 -1
- pydantic_ai/providers/azure.py +1 -1
- pydantic_ai/providers/bedrock.py +1 -1
- pydantic_ai/providers/cerebras.py +1 -1
- pydantic_ai/providers/cohere.py +1 -1
- pydantic_ai/providers/deepseek.py +1 -1
- pydantic_ai/providers/fireworks.py +1 -1
- pydantic_ai/providers/github.py +1 -1
- pydantic_ai/providers/google.py +1 -1
- pydantic_ai/providers/google_gla.py +1 -1
- pydantic_ai/providers/google_vertex.py +1 -1
- pydantic_ai/providers/grok.py +1 -1
- pydantic_ai/providers/groq.py +1 -1
- pydantic_ai/providers/heroku.py +1 -1
- pydantic_ai/providers/huggingface.py +1 -1
- pydantic_ai/providers/litellm.py +1 -1
- pydantic_ai/providers/mistral.py +1 -1
- pydantic_ai/providers/moonshotai.py +1 -1
- pydantic_ai/providers/ollama.py +3 -1
- pydantic_ai/providers/openai.py +1 -1
- pydantic_ai/providers/openrouter.py +1 -1
- pydantic_ai/providers/together.py +1 -1
- pydantic_ai/providers/vercel.py +1 -1
- pydantic_ai/toolsets/function.py +1 -2
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/METADATA +6 -6
- pydantic_ai_slim-1.0.12.dist-info/RECORD +127 -0
- pydantic_ai_slim-1.0.10.dist-info/RECORD +0 -127
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/messages.py
CHANGED
|
@@ -114,6 +114,20 @@ class FileUrl(ABC):
|
|
|
114
114
|
|
|
115
115
|
_: KW_ONLY
|
|
116
116
|
|
|
117
|
+
identifier: str
|
|
118
|
+
"""The identifier of the file, such as a unique ID. generating one from the url if not explicitly set.
|
|
119
|
+
|
|
120
|
+
This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
|
|
121
|
+
and the tool can look up the file in question by iterating over the message history and finding the matching `FileUrl`.
|
|
122
|
+
|
|
123
|
+
This identifier is only automatically passed to the model when the `FileUrl` is returned by a tool.
|
|
124
|
+
If you're passing the `FileUrl` as a user message, it's up to you to include a separate text part with the identifier,
|
|
125
|
+
e.g. "This is file <identifier>:" preceding the `FileUrl`.
|
|
126
|
+
|
|
127
|
+
It's also included in inline-text delimiters for providers that require inlining text documents, so the model can
|
|
128
|
+
distinguish multiple files.
|
|
129
|
+
"""
|
|
130
|
+
|
|
117
131
|
force_download: bool = False
|
|
118
132
|
"""If the model supports it:
|
|
119
133
|
|
|
@@ -126,23 +140,13 @@ class FileUrl(ABC):
|
|
|
126
140
|
|
|
127
141
|
Supported by:
|
|
128
142
|
- `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
143
|
+
- `OpenAIChatModel`, `OpenAIResponsesModel`: `ImageUrl.vendor_metadata['detail']` is used as `detail` setting for images
|
|
129
144
|
"""
|
|
130
145
|
|
|
131
146
|
_media_type: Annotated[str | None, pydantic.Field(alias='media_type', default=None, exclude=True)] = field(
|
|
132
147
|
compare=False, default=None
|
|
133
148
|
)
|
|
134
149
|
|
|
135
|
-
identifier: str | None = None
|
|
136
|
-
"""The identifier of the file, such as a unique ID. generating one from the url if not explicitly set
|
|
137
|
-
|
|
138
|
-
This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
|
|
139
|
-
and the tool can look up the file in question by iterating over the message history and finding the matching `FileUrl`.
|
|
140
|
-
|
|
141
|
-
This identifier is only automatically passed to the model when the `FileUrl` is returned by a tool.
|
|
142
|
-
If you're passing the `FileUrl` as a user message, it's up to you to include a separate text part with the identifier,
|
|
143
|
-
e.g. "This is file <identifier>:" preceding the `FileUrl`.
|
|
144
|
-
"""
|
|
145
|
-
|
|
146
150
|
def __init__(
|
|
147
151
|
self,
|
|
148
152
|
url: str,
|
|
@@ -471,6 +475,7 @@ class BinaryContent:
|
|
|
471
475
|
|
|
472
476
|
Supported by:
|
|
473
477
|
- `GoogleModel`: `BinaryContent.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
478
|
+
- `OpenAIChatModel`, `OpenAIResponsesModel`: `BinaryContent.vendor_metadata['detail']` is used as `detail` setting for images
|
|
474
479
|
"""
|
|
475
480
|
|
|
476
481
|
kind: Literal['binary'] = 'binary'
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -20,6 +20,7 @@ import httpx
|
|
|
20
20
|
from typing_extensions import TypeAliasType, TypedDict
|
|
21
21
|
|
|
22
22
|
from .. import _utils
|
|
23
|
+
from .._json_schema import JsonSchemaTransformer
|
|
23
24
|
from .._output import OutputObjectDefinition
|
|
24
25
|
from .._parts_manager import ModelResponsePartsManager
|
|
25
26
|
from .._run_context import RunContext
|
|
@@ -40,7 +41,6 @@ from ..messages import (
|
|
|
40
41
|
)
|
|
41
42
|
from ..output import OutputMode
|
|
42
43
|
from ..profiles import DEFAULT_PROFILE, ModelProfile, ModelProfileSpec
|
|
43
|
-
from ..profiles._json_schema import JsonSchemaTransformer
|
|
44
44
|
from ..settings import ModelSettings
|
|
45
45
|
from ..tools import ToolDefinition
|
|
46
46
|
from ..usage import RequestUsage
|
|
@@ -65,6 +65,8 @@ KnownModelName = TypeAliasType(
|
|
|
65
65
|
'anthropic:claude-opus-4-20250514',
|
|
66
66
|
'anthropic:claude-sonnet-4-0',
|
|
67
67
|
'anthropic:claude-sonnet-4-20250514',
|
|
68
|
+
'anthropic:claude-sonnet-4-5',
|
|
69
|
+
'anthropic:claude-sonnet-4-5-20250929',
|
|
68
70
|
'bedrock:amazon.titan-tg1-large',
|
|
69
71
|
'bedrock:amazon.titan-text-lite-v1',
|
|
70
72
|
'bedrock:amazon.titan-text-express-v1',
|
|
@@ -121,23 +123,6 @@ KnownModelName = TypeAliasType(
|
|
|
121
123
|
'cerebras:qwen-3-32b',
|
|
122
124
|
'cerebras:qwen-3-coder-480b',
|
|
123
125
|
'cerebras:qwen-3-235b-a22b-thinking-2507',
|
|
124
|
-
'claude-3-5-haiku-20241022',
|
|
125
|
-
'claude-3-5-haiku-latest',
|
|
126
|
-
'claude-3-5-sonnet-20240620',
|
|
127
|
-
'claude-3-5-sonnet-20241022',
|
|
128
|
-
'claude-3-5-sonnet-latest',
|
|
129
|
-
'claude-3-7-sonnet-20250219',
|
|
130
|
-
'claude-3-7-sonnet-latest',
|
|
131
|
-
'claude-3-haiku-20240307',
|
|
132
|
-
'claude-3-opus-20240229',
|
|
133
|
-
'claude-3-opus-latest',
|
|
134
|
-
'claude-4-opus-20250514',
|
|
135
|
-
'claude-4-sonnet-20250514',
|
|
136
|
-
'claude-opus-4-0',
|
|
137
|
-
'claude-opus-4-1-20250805',
|
|
138
|
-
'claude-opus-4-20250514',
|
|
139
|
-
'claude-sonnet-4-0',
|
|
140
|
-
'claude-sonnet-4-20250514',
|
|
141
126
|
'cohere:c4ai-aya-expanse-32b',
|
|
142
127
|
'cohere:c4ai-aya-expanse-8b',
|
|
143
128
|
'cohere:command',
|
|
@@ -163,54 +148,6 @@ KnownModelName = TypeAliasType(
|
|
|
163
148
|
'google-vertex:gemini-2.5-flash',
|
|
164
149
|
'google-vertex:gemini-2.5-flash-lite',
|
|
165
150
|
'google-vertex:gemini-2.5-pro',
|
|
166
|
-
'gpt-3.5-turbo',
|
|
167
|
-
'gpt-3.5-turbo-0125',
|
|
168
|
-
'gpt-3.5-turbo-0301',
|
|
169
|
-
'gpt-3.5-turbo-0613',
|
|
170
|
-
'gpt-3.5-turbo-1106',
|
|
171
|
-
'gpt-3.5-turbo-16k',
|
|
172
|
-
'gpt-3.5-turbo-16k-0613',
|
|
173
|
-
'gpt-4',
|
|
174
|
-
'gpt-4-0125-preview',
|
|
175
|
-
'gpt-4-0314',
|
|
176
|
-
'gpt-4-0613',
|
|
177
|
-
'gpt-4-1106-preview',
|
|
178
|
-
'gpt-4-32k',
|
|
179
|
-
'gpt-4-32k-0314',
|
|
180
|
-
'gpt-4-32k-0613',
|
|
181
|
-
'gpt-4-turbo',
|
|
182
|
-
'gpt-4-turbo-2024-04-09',
|
|
183
|
-
'gpt-4-turbo-preview',
|
|
184
|
-
'gpt-4-vision-preview',
|
|
185
|
-
'gpt-4.1',
|
|
186
|
-
'gpt-4.1-2025-04-14',
|
|
187
|
-
'gpt-4.1-mini',
|
|
188
|
-
'gpt-4.1-mini-2025-04-14',
|
|
189
|
-
'gpt-4.1-nano',
|
|
190
|
-
'gpt-4.1-nano-2025-04-14',
|
|
191
|
-
'gpt-4o',
|
|
192
|
-
'gpt-4o-2024-05-13',
|
|
193
|
-
'gpt-4o-2024-08-06',
|
|
194
|
-
'gpt-4o-2024-11-20',
|
|
195
|
-
'gpt-4o-audio-preview',
|
|
196
|
-
'gpt-4o-audio-preview-2024-10-01',
|
|
197
|
-
'gpt-4o-audio-preview-2024-12-17',
|
|
198
|
-
'gpt-4o-audio-preview-2025-06-03',
|
|
199
|
-
'gpt-4o-mini',
|
|
200
|
-
'gpt-4o-mini-2024-07-18',
|
|
201
|
-
'gpt-4o-mini-audio-preview',
|
|
202
|
-
'gpt-4o-mini-audio-preview-2024-12-17',
|
|
203
|
-
'gpt-4o-mini-search-preview',
|
|
204
|
-
'gpt-4o-mini-search-preview-2025-03-11',
|
|
205
|
-
'gpt-4o-search-preview',
|
|
206
|
-
'gpt-4o-search-preview-2025-03-11',
|
|
207
|
-
'gpt-5',
|
|
208
|
-
'gpt-5-2025-08-07',
|
|
209
|
-
'gpt-5-chat-latest',
|
|
210
|
-
'gpt-5-mini',
|
|
211
|
-
'gpt-5-mini-2025-08-07',
|
|
212
|
-
'gpt-5-nano',
|
|
213
|
-
'gpt-5-nano-2025-08-07',
|
|
214
151
|
'grok:grok-4',
|
|
215
152
|
'grok:grok-4-0709',
|
|
216
153
|
'grok:grok-3',
|
|
@@ -271,22 +208,6 @@ KnownModelName = TypeAliasType(
|
|
|
271
208
|
'moonshotai:kimi-latest',
|
|
272
209
|
'moonshotai:kimi-thinking-preview',
|
|
273
210
|
'moonshotai:kimi-k2-0711-preview',
|
|
274
|
-
'o1',
|
|
275
|
-
'o1-2024-12-17',
|
|
276
|
-
'o1-mini',
|
|
277
|
-
'o1-mini-2024-09-12',
|
|
278
|
-
'o1-preview',
|
|
279
|
-
'o1-preview-2024-09-12',
|
|
280
|
-
'o1-pro',
|
|
281
|
-
'o1-pro-2025-03-19',
|
|
282
|
-
'o3',
|
|
283
|
-
'o3-2025-04-16',
|
|
284
|
-
'o3-deep-research',
|
|
285
|
-
'o3-deep-research-2025-06-26',
|
|
286
|
-
'o3-mini',
|
|
287
|
-
'o3-mini-2025-01-31',
|
|
288
|
-
'o3-pro',
|
|
289
|
-
'o3-pro-2025-06-10',
|
|
290
211
|
'openai:chatgpt-4o-latest',
|
|
291
212
|
'openai:codex-mini-latest',
|
|
292
213
|
'openai:gpt-3.5-turbo',
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -10,11 +10,10 @@ from typing import Any, Literal, cast, overload
|
|
|
10
10
|
from pydantic import TypeAdapter
|
|
11
11
|
from typing_extensions import assert_never
|
|
12
12
|
|
|
13
|
-
from pydantic_ai.builtin_tools import CodeExecutionTool, WebSearchTool
|
|
14
|
-
|
|
15
13
|
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
16
14
|
from .._run_context import RunContext
|
|
17
15
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
16
|
+
from ..builtin_tools import CodeExecutionTool, MemoryTool, WebSearchTool
|
|
18
17
|
from ..exceptions import UserError
|
|
19
18
|
from ..messages import (
|
|
20
19
|
BinaryContent,
|
|
@@ -54,7 +53,7 @@ _FINISH_REASON_MAP: dict[BetaStopReason, FinishReason] = {
|
|
|
54
53
|
|
|
55
54
|
|
|
56
55
|
try:
|
|
57
|
-
from anthropic import NOT_GIVEN, APIStatusError, AsyncStream
|
|
56
|
+
from anthropic import NOT_GIVEN, APIStatusError, AsyncStream, omit as OMIT
|
|
58
57
|
from anthropic.types.beta import (
|
|
59
58
|
BetaBase64PDFBlockParam,
|
|
60
59
|
BetaBase64PDFSourceParam,
|
|
@@ -68,6 +67,7 @@ try:
|
|
|
68
67
|
BetaContentBlockParam,
|
|
69
68
|
BetaImageBlockParam,
|
|
70
69
|
BetaInputJSONDelta,
|
|
70
|
+
BetaMemoryTool20250818Param,
|
|
71
71
|
BetaMessage,
|
|
72
72
|
BetaMessageParam,
|
|
73
73
|
BetaMetadataParam,
|
|
@@ -255,8 +255,7 @@ class AnthropicModel(Model):
|
|
|
255
255
|
) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
|
|
256
256
|
# standalone function to make it easier to override
|
|
257
257
|
tools = self._get_tools(model_request_parameters)
|
|
258
|
-
|
|
259
|
-
tools += builtin_tools
|
|
258
|
+
tools, beta_features = self._add_builtin_tools(tools, model_request_parameters)
|
|
260
259
|
|
|
261
260
|
tool_choice: BetaToolChoiceParam | None
|
|
262
261
|
|
|
@@ -265,6 +264,10 @@ class AnthropicModel(Model):
|
|
|
265
264
|
else:
|
|
266
265
|
if not model_request_parameters.allow_text_output:
|
|
267
266
|
tool_choice = {'type': 'any'}
|
|
267
|
+
if (thinking := model_settings.get('anthropic_thinking')) and thinking.get('type') == 'enabled':
|
|
268
|
+
raise UserError(
|
|
269
|
+
'Anthropic does not support thinking and output tools at the same time. Use `output_type=PromptedOutput(...)` instead.'
|
|
270
|
+
)
|
|
268
271
|
else:
|
|
269
272
|
tool_choice = {'type': 'auto'}
|
|
270
273
|
|
|
@@ -275,24 +278,26 @@ class AnthropicModel(Model):
|
|
|
275
278
|
|
|
276
279
|
try:
|
|
277
280
|
extra_headers = model_settings.get('extra_headers', {})
|
|
278
|
-
for k, v in tool_headers.items():
|
|
279
|
-
extra_headers.setdefault(k, v)
|
|
280
281
|
extra_headers.setdefault('User-Agent', get_user_agent())
|
|
282
|
+
if beta_features:
|
|
283
|
+
if 'anthropic-beta' in extra_headers:
|
|
284
|
+
beta_features.insert(0, extra_headers['anthropic-beta'])
|
|
285
|
+
extra_headers['anthropic-beta'] = ','.join(beta_features)
|
|
281
286
|
|
|
282
287
|
return await self.client.beta.messages.create(
|
|
283
288
|
max_tokens=model_settings.get('max_tokens', 4096),
|
|
284
|
-
system=system_prompt or
|
|
289
|
+
system=system_prompt or OMIT,
|
|
285
290
|
messages=anthropic_messages,
|
|
286
291
|
model=self._model_name,
|
|
287
|
-
tools=tools or
|
|
288
|
-
tool_choice=tool_choice or
|
|
292
|
+
tools=tools or OMIT,
|
|
293
|
+
tool_choice=tool_choice or OMIT,
|
|
289
294
|
stream=stream,
|
|
290
|
-
thinking=model_settings.get('anthropic_thinking',
|
|
291
|
-
stop_sequences=model_settings.get('stop_sequences',
|
|
292
|
-
temperature=model_settings.get('temperature',
|
|
293
|
-
top_p=model_settings.get('top_p',
|
|
295
|
+
thinking=model_settings.get('anthropic_thinking', OMIT),
|
|
296
|
+
stop_sequences=model_settings.get('stop_sequences', OMIT),
|
|
297
|
+
temperature=model_settings.get('temperature', OMIT),
|
|
298
|
+
top_p=model_settings.get('top_p', OMIT),
|
|
294
299
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
295
|
-
metadata=model_settings.get('anthropic_metadata',
|
|
300
|
+
metadata=model_settings.get('anthropic_metadata', OMIT),
|
|
296
301
|
extra_headers=extra_headers,
|
|
297
302
|
extra_body=model_settings.get('extra_body'),
|
|
298
303
|
)
|
|
@@ -363,14 +368,13 @@ class AnthropicModel(Model):
|
|
|
363
368
|
_provider_name=self._provider.name,
|
|
364
369
|
)
|
|
365
370
|
|
|
366
|
-
def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[
|
|
371
|
+
def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[BetaToolUnionParam]:
|
|
367
372
|
return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]
|
|
368
373
|
|
|
369
|
-
def
|
|
370
|
-
self, model_request_parameters: ModelRequestParameters
|
|
371
|
-
) -> tuple[list[BetaToolUnionParam],
|
|
372
|
-
|
|
373
|
-
extra_headers: dict[str, str] = {}
|
|
374
|
+
def _add_builtin_tools(
|
|
375
|
+
self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters
|
|
376
|
+
) -> tuple[list[BetaToolUnionParam], list[str]]:
|
|
377
|
+
beta_features: list[str] = []
|
|
374
378
|
for tool in model_request_parameters.builtin_tools:
|
|
375
379
|
if isinstance(tool, WebSearchTool):
|
|
376
380
|
user_location = UserLocation(type='approximate', **tool.user_location) if tool.user_location else None
|
|
@@ -385,13 +389,20 @@ class AnthropicModel(Model):
|
|
|
385
389
|
)
|
|
386
390
|
)
|
|
387
391
|
elif isinstance(tool, CodeExecutionTool): # pragma: no branch
|
|
388
|
-
extra_headers['anthropic-beta'] = 'code-execution-2025-05-22'
|
|
389
392
|
tools.append(BetaCodeExecutionTool20250522Param(name='code_execution', type='code_execution_20250522'))
|
|
393
|
+
beta_features.append('code-execution-2025-05-22')
|
|
394
|
+
elif isinstance(tool, MemoryTool): # pragma: no branch
|
|
395
|
+
if 'memory' not in model_request_parameters.tool_defs:
|
|
396
|
+
raise UserError("Built-in `MemoryTool` requires a 'memory' tool to be defined.")
|
|
397
|
+
# Replace the memory tool definition with the built-in memory tool
|
|
398
|
+
tools = [tool for tool in tools if tool['name'] != 'memory']
|
|
399
|
+
tools.append(BetaMemoryTool20250818Param(name='memory', type='memory_20250818'))
|
|
400
|
+
beta_features.append('context-management-2025-06-27')
|
|
390
401
|
else: # pragma: no cover
|
|
391
402
|
raise UserError(
|
|
392
403
|
f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.'
|
|
393
404
|
)
|
|
394
|
-
return tools,
|
|
405
|
+
return tools, beta_features
|
|
395
406
|
|
|
396
407
|
async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]: # noqa: C901
|
|
397
408
|
"""Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`."""
|
|
@@ -759,6 +770,8 @@ def _map_server_tool_use_block(item: BetaServerToolUseBlock, provider_name: str)
|
|
|
759
770
|
args=cast(dict[str, Any], item.input) or None,
|
|
760
771
|
tool_call_id=item.id,
|
|
761
772
|
)
|
|
773
|
+
elif item.name in ('web_fetch', 'bash_code_execution', 'text_editor_code_execution'): # pragma: no cover
|
|
774
|
+
raise NotImplementedError(f'Anthropic built-in tool {item.name!r} is not currently supported.')
|
|
762
775
|
else:
|
|
763
776
|
assert_never(item.name)
|
|
764
777
|
|
pydantic_ai/models/bedrock.py
CHANGED
|
@@ -13,10 +13,7 @@ import anyio
|
|
|
13
13
|
import anyio.to_thread
|
|
14
14
|
from typing_extensions import ParamSpec, assert_never
|
|
15
15
|
|
|
16
|
-
from pydantic_ai import
|
|
17
|
-
from pydantic_ai._run_context import RunContext
|
|
18
|
-
from pydantic_ai.exceptions import UserError
|
|
19
|
-
from pydantic_ai.messages import (
|
|
16
|
+
from pydantic_ai import (
|
|
20
17
|
AudioUrl,
|
|
21
18
|
BinaryContent,
|
|
22
19
|
BuiltinToolCallPart,
|
|
@@ -25,6 +22,7 @@ from pydantic_ai.messages import (
|
|
|
25
22
|
FinishReason,
|
|
26
23
|
ImageUrl,
|
|
27
24
|
ModelMessage,
|
|
25
|
+
ModelProfileSpec,
|
|
28
26
|
ModelRequest,
|
|
29
27
|
ModelResponse,
|
|
30
28
|
ModelResponsePart,
|
|
@@ -37,9 +35,12 @@ from pydantic_ai.messages import (
|
|
|
37
35
|
ToolReturnPart,
|
|
38
36
|
UserPromptPart,
|
|
39
37
|
VideoUrl,
|
|
38
|
+
_utils,
|
|
39
|
+
usage,
|
|
40
40
|
)
|
|
41
|
+
from pydantic_ai._run_context import RunContext
|
|
42
|
+
from pydantic_ai.exceptions import UserError
|
|
41
43
|
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse, download_item
|
|
42
|
-
from pydantic_ai.profiles import ModelProfileSpec
|
|
43
44
|
from pydantic_ai.providers import Provider, infer_provider
|
|
44
45
|
from pydantic_ai.providers.bedrock import BedrockModelProfile
|
|
45
46
|
from pydantic_ai.settings import ModelSettings
|
pydantic_ai/models/google.py
CHANGED
|
@@ -419,8 +419,8 @@ class GoogleModel(Model):
|
|
|
419
419
|
return contents, config
|
|
420
420
|
|
|
421
421
|
def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
|
|
422
|
-
if not response.candidates
|
|
423
|
-
raise UnexpectedModelBehavior('Expected
|
|
422
|
+
if not response.candidates:
|
|
423
|
+
raise UnexpectedModelBehavior('Expected at least one candidate in Gemini response') # pragma: no cover
|
|
424
424
|
candidate = response.candidates[0]
|
|
425
425
|
if candidate.content is None or candidate.content.parts is None:
|
|
426
426
|
if candidate.finish_reason == 'SAFETY':
|
|
@@ -9,6 +9,7 @@ from dataclasses import dataclass, field
|
|
|
9
9
|
from typing import Any, Literal, cast
|
|
10
10
|
from urllib.parse import urlparse
|
|
11
11
|
|
|
12
|
+
from genai_prices.types import PriceCalculation
|
|
12
13
|
from opentelemetry._events import (
|
|
13
14
|
Event, # pyright: ignore[reportPrivateImportUsage]
|
|
14
15
|
EventLogger, # pyright: ignore[reportPrivateImportUsage]
|
|
@@ -169,6 +170,11 @@ class InstrumentationSettings:
|
|
|
169
170
|
self.tokens_histogram = self.meter.create_histogram(
|
|
170
171
|
**tokens_histogram_kwargs, # pyright: ignore
|
|
171
172
|
)
|
|
173
|
+
self.cost_histogram = self.meter.create_histogram(
|
|
174
|
+
'operation.cost',
|
|
175
|
+
unit='{USD}',
|
|
176
|
+
description='Monetary cost',
|
|
177
|
+
)
|
|
172
178
|
|
|
173
179
|
def messages_to_otel_events(self, messages: list[ModelMessage]) -> list[Event]:
|
|
174
180
|
"""Convert a list of model messages to OpenTelemetry events.
|
|
@@ -302,6 +308,21 @@ class InstrumentationSettings:
|
|
|
302
308
|
}
|
|
303
309
|
)
|
|
304
310
|
|
|
311
|
+
def record_metrics(
|
|
312
|
+
self,
|
|
313
|
+
response: ModelResponse,
|
|
314
|
+
price_calculation: PriceCalculation | None,
|
|
315
|
+
attributes: dict[str, AttributeValue],
|
|
316
|
+
):
|
|
317
|
+
for typ in ['input', 'output']:
|
|
318
|
+
if not (tokens := getattr(response.usage, f'{typ}_tokens', 0)): # pragma: no cover
|
|
319
|
+
continue
|
|
320
|
+
token_attributes = {**attributes, 'gen_ai.token.type': typ}
|
|
321
|
+
self.tokens_histogram.record(tokens, token_attributes)
|
|
322
|
+
if price_calculation:
|
|
323
|
+
cost = float(getattr(price_calculation, f'{typ}_price'))
|
|
324
|
+
self.cost_histogram.record(cost, token_attributes)
|
|
325
|
+
|
|
305
326
|
|
|
306
327
|
GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'
|
|
307
328
|
GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
|
|
@@ -395,6 +416,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
395
416
|
system = cast(str, attributes[GEN_AI_SYSTEM_ATTRIBUTE])
|
|
396
417
|
|
|
397
418
|
response_model = response.model_name or request_model
|
|
419
|
+
price_calculation = None
|
|
398
420
|
|
|
399
421
|
def _record_metrics():
|
|
400
422
|
metric_attributes = {
|
|
@@ -403,16 +425,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
403
425
|
'gen_ai.request.model': request_model,
|
|
404
426
|
'gen_ai.response.model': response_model,
|
|
405
427
|
}
|
|
406
|
-
|
|
407
|
-
self.instrumentation_settings.tokens_histogram.record(
|
|
408
|
-
response.usage.input_tokens,
|
|
409
|
-
{**metric_attributes, 'gen_ai.token.type': 'input'},
|
|
410
|
-
)
|
|
411
|
-
if response.usage.output_tokens: # pragma: no branch
|
|
412
|
-
self.instrumentation_settings.tokens_histogram.record(
|
|
413
|
-
response.usage.output_tokens,
|
|
414
|
-
{**metric_attributes, 'gen_ai.token.type': 'output'},
|
|
415
|
-
)
|
|
428
|
+
self.instrumentation_settings.record_metrics(response, price_calculation, metric_attributes)
|
|
416
429
|
|
|
417
430
|
nonlocal record_metrics
|
|
418
431
|
record_metrics = _record_metrics
|
|
@@ -427,7 +440,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
427
440
|
'gen_ai.response.model': response_model,
|
|
428
441
|
}
|
|
429
442
|
try:
|
|
430
|
-
|
|
443
|
+
price_calculation = response.cost()
|
|
431
444
|
except LookupError:
|
|
432
445
|
# The cost of this provider/model is unknown, which is common.
|
|
433
446
|
pass
|
|
@@ -435,6 +448,9 @@ class InstrumentedModel(WrapperModel):
|
|
|
435
448
|
warnings.warn(
|
|
436
449
|
f'Failed to get cost from response: {type(e).__name__}: {e}', CostCalculationFailedWarning
|
|
437
450
|
)
|
|
451
|
+
else:
|
|
452
|
+
attributes_to_set['operation.cost'] = float(price_calculation.total_price)
|
|
453
|
+
|
|
438
454
|
if response.provider_response_id is not None:
|
|
439
455
|
attributes_to_set['gen_ai.response.id'] = response.provider_response_id
|
|
440
456
|
if response.finish_reason is not None:
|