pydantic-ai-slim 1.0.0b1__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/_a2a.py +1 -1
- pydantic_ai/_agent_graph.py +16 -19
- pydantic_ai/_parts_manager.py +3 -1
- pydantic_ai/_tool_manager.py +29 -6
- pydantic_ai/ag_ui.py +75 -43
- pydantic_ai/agent/__init__.py +7 -7
- pydantic_ai/durable_exec/temporal/_agent.py +71 -10
- pydantic_ai/exceptions.py +2 -2
- pydantic_ai/mcp.py +13 -25
- pydantic_ai/messages.py +78 -19
- pydantic_ai/models/__init__.py +1 -0
- pydantic_ai/models/anthropic.py +4 -11
- pydantic_ai/models/bedrock.py +6 -14
- pydantic_ai/models/gemini.py +3 -1
- pydantic_ai/models/google.py +15 -1
- pydantic_ai/models/groq.py +122 -34
- pydantic_ai/models/instrumented.py +5 -0
- pydantic_ai/models/openai.py +17 -13
- pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai/providers/google_vertex.py +2 -1
- pydantic_ai/providers/groq.py +21 -2
- pydantic_ai/providers/litellm.py +134 -0
- pydantic_ai/retries.py +42 -2
- pydantic_ai/tools.py +7 -7
- pydantic_ai/toolsets/combined.py +2 -2
- pydantic_ai/toolsets/function.py +47 -19
- pydantic_ai/usage.py +37 -3
- {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.1.dist-info}/METADATA +6 -7
- {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.1.dist-info}/RECORD +32 -31
- {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.1.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.1.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.0b1.dist-info → pydantic_ai_slim-1.0.1.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/mcp.py
CHANGED
|
@@ -20,6 +20,7 @@ from typing_extensions import Self, assert_never, deprecated
|
|
|
20
20
|
|
|
21
21
|
from pydantic_ai.tools import RunContext, ToolDefinition
|
|
22
22
|
|
|
23
|
+
from .direct import model_request
|
|
23
24
|
from .toolsets.abstract import AbstractToolset, ToolsetTool
|
|
24
25
|
|
|
25
26
|
try:
|
|
@@ -300,6 +301,8 @@ class MCPServer(AbstractToolset[Any], ABC):
|
|
|
300
301
|
return self
|
|
301
302
|
|
|
302
303
|
async def __aexit__(self, *args: Any) -> bool | None:
|
|
304
|
+
if self._running_count == 0:
|
|
305
|
+
raise ValueError('MCPServer.__aexit__ called more times than __aenter__')
|
|
303
306
|
async with self._enter_lock:
|
|
304
307
|
self._running_count -= 1
|
|
305
308
|
if self._running_count == 0 and self._exit_stack is not None:
|
|
@@ -327,11 +330,7 @@ class MCPServer(AbstractToolset[Any], ABC):
|
|
|
327
330
|
if stop_sequences := params.stopSequences: # pragma: no branch
|
|
328
331
|
model_settings['stop_sequences'] = stop_sequences
|
|
329
332
|
|
|
330
|
-
model_response = await self.sampling_model
|
|
331
|
-
pai_messages,
|
|
332
|
-
model_settings,
|
|
333
|
-
models.ModelRequestParameters(),
|
|
334
|
-
)
|
|
333
|
+
model_response = await model_request(self.sampling_model, pai_messages, model_settings=model_settings)
|
|
335
334
|
return mcp_types.CreateMessageResult(
|
|
336
335
|
role='assistant',
|
|
337
336
|
content=_mcp.map_from_model_response(model_response),
|
|
@@ -401,16 +400,7 @@ class MCPServerStdio(MCPServer):
|
|
|
401
400
|
from pydantic_ai.mcp import MCPServerStdio
|
|
402
401
|
|
|
403
402
|
server = MCPServerStdio( # (1)!
|
|
404
|
-
'
|
|
405
|
-
args=[
|
|
406
|
-
'run',
|
|
407
|
-
'-N',
|
|
408
|
-
'-R=node_modules',
|
|
409
|
-
'-W=node_modules',
|
|
410
|
-
'--node-modules-dir=auto',
|
|
411
|
-
'jsr:@pydantic/mcp-run-python',
|
|
412
|
-
'stdio',
|
|
413
|
-
]
|
|
403
|
+
'uv', args=['run', 'mcp-run-python', 'stdio'], timeout=10
|
|
414
404
|
)
|
|
415
405
|
agent = Agent('openai:gpt-4o', toolsets=[server])
|
|
416
406
|
|
|
@@ -419,7 +409,7 @@ class MCPServerStdio(MCPServer):
|
|
|
419
409
|
...
|
|
420
410
|
```
|
|
421
411
|
|
|
422
|
-
1. See [MCP Run Python](
|
|
412
|
+
1. See [MCP Run Python](https://github.com/pydantic/mcp-run-python) for more information.
|
|
423
413
|
2. This will start the server as a subprocess and connect to it.
|
|
424
414
|
"""
|
|
425
415
|
|
|
@@ -455,6 +445,7 @@ class MCPServerStdio(MCPServer):
|
|
|
455
445
|
self,
|
|
456
446
|
command: str,
|
|
457
447
|
args: Sequence[str],
|
|
448
|
+
*,
|
|
458
449
|
env: dict[str, str] | None = None,
|
|
459
450
|
cwd: str | Path | None = None,
|
|
460
451
|
tool_prefix: str | None = None,
|
|
@@ -467,7 +458,6 @@ class MCPServerStdio(MCPServer):
|
|
|
467
458
|
sampling_model: models.Model | None = None,
|
|
468
459
|
max_retries: int = 1,
|
|
469
460
|
elicitation_callback: ElicitationFnT | None = None,
|
|
470
|
-
*,
|
|
471
461
|
id: str | None = None,
|
|
472
462
|
):
|
|
473
463
|
"""Build a new MCP server.
|
|
@@ -581,8 +571,8 @@ class _MCPServerHTTP(MCPServer):
|
|
|
581
571
|
|
|
582
572
|
def __init__(
|
|
583
573
|
self,
|
|
584
|
-
*,
|
|
585
574
|
url: str,
|
|
575
|
+
*,
|
|
586
576
|
headers: dict[str, str] | None = None,
|
|
587
577
|
http_client: httpx.AsyncClient | None = None,
|
|
588
578
|
id: str | None = None,
|
|
@@ -732,16 +722,15 @@ class MCPServerSSE(_MCPServerHTTP):
|
|
|
732
722
|
from pydantic_ai import Agent
|
|
733
723
|
from pydantic_ai.mcp import MCPServerSSE
|
|
734
724
|
|
|
735
|
-
server = MCPServerSSE('http://localhost:3001/sse')
|
|
725
|
+
server = MCPServerSSE('http://localhost:3001/sse')
|
|
736
726
|
agent = Agent('openai:gpt-4o', toolsets=[server])
|
|
737
727
|
|
|
738
728
|
async def main():
|
|
739
|
-
async with agent: # (
|
|
729
|
+
async with agent: # (1)!
|
|
740
730
|
...
|
|
741
731
|
```
|
|
742
732
|
|
|
743
|
-
1.
|
|
744
|
-
2. This will connect to a server running on `localhost:3001`.
|
|
733
|
+
1. This will connect to a server running on `localhost:3001`.
|
|
745
734
|
"""
|
|
746
735
|
|
|
747
736
|
@property
|
|
@@ -765,7 +754,7 @@ class MCPServerHTTP(MCPServerSSE):
|
|
|
765
754
|
from pydantic_ai import Agent
|
|
766
755
|
from pydantic_ai.mcp import MCPServerHTTP
|
|
767
756
|
|
|
768
|
-
server = MCPServerHTTP('http://localhost:3001/sse')
|
|
757
|
+
server = MCPServerHTTP('http://localhost:3001/sse')
|
|
769
758
|
agent = Agent('openai:gpt-4o', toolsets=[server])
|
|
770
759
|
|
|
771
760
|
async def main():
|
|
@@ -773,8 +762,7 @@ class MCPServerHTTP(MCPServerSSE):
|
|
|
773
762
|
...
|
|
774
763
|
```
|
|
775
764
|
|
|
776
|
-
1.
|
|
777
|
-
2. This will connect to a server running on `localhost:3001`.
|
|
765
|
+
1. This will connect to a server running on `localhost:3001`.
|
|
778
766
|
"""
|
|
779
767
|
|
|
780
768
|
|
pydantic_ai/messages.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import base64
|
|
4
|
+
import hashlib
|
|
4
5
|
from abc import ABC, abstractmethod
|
|
5
6
|
from collections.abc import Sequence
|
|
6
7
|
from dataclasses import KW_ONLY, dataclass, field, replace
|
|
@@ -88,6 +89,13 @@ class SystemPromptPart:
|
|
|
88
89
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
89
90
|
|
|
90
91
|
|
|
92
|
+
def _multi_modal_content_identifier(identifier: str | bytes) -> str:
|
|
93
|
+
"""Generate stable identifier for multi-modal content to help LLM in finding a specific file in tool call responses."""
|
|
94
|
+
if isinstance(identifier, str):
|
|
95
|
+
identifier = identifier.encode('utf-8')
|
|
96
|
+
return hashlib.sha1(identifier).hexdigest()[:6]
|
|
97
|
+
|
|
98
|
+
|
|
91
99
|
@dataclass(init=False, repr=False)
|
|
92
100
|
class FileUrl(ABC):
|
|
93
101
|
"""Abstract base class for any URL-based file."""
|
|
@@ -115,17 +123,31 @@ class FileUrl(ABC):
|
|
|
115
123
|
compare=False, default=None
|
|
116
124
|
)
|
|
117
125
|
|
|
126
|
+
identifier: str | None = None
|
|
127
|
+
"""The identifier of the file, such as a unique ID. generating one from the url if not explicitly set
|
|
128
|
+
|
|
129
|
+
This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
|
|
130
|
+
and the tool can look up the file in question by iterating over the message history and finding the matching `FileUrl`.
|
|
131
|
+
|
|
132
|
+
This identifier is only automatically passed to the model when the `FileUrl` is returned by a tool.
|
|
133
|
+
If you're passing the `FileUrl` as a user message, it's up to you to include a separate text part with the identifier,
|
|
134
|
+
e.g. "This is file <identifier>:" preceding the `FileUrl`.
|
|
135
|
+
"""
|
|
136
|
+
|
|
118
137
|
def __init__(
|
|
119
138
|
self,
|
|
120
139
|
url: str,
|
|
140
|
+
*,
|
|
121
141
|
force_download: bool = False,
|
|
122
142
|
vendor_metadata: dict[str, Any] | None = None,
|
|
123
143
|
media_type: str | None = None,
|
|
144
|
+
identifier: str | None = None,
|
|
124
145
|
) -> None:
|
|
125
146
|
self.url = url
|
|
126
|
-
self.vendor_metadata = vendor_metadata
|
|
127
147
|
self.force_download = force_download
|
|
148
|
+
self.vendor_metadata = vendor_metadata
|
|
128
149
|
self._media_type = media_type
|
|
150
|
+
self.identifier = identifier or _multi_modal_content_identifier(url)
|
|
129
151
|
|
|
130
152
|
@pydantic.computed_field
|
|
131
153
|
@property
|
|
@@ -162,11 +184,12 @@ class VideoUrl(FileUrl):
|
|
|
162
184
|
def __init__(
|
|
163
185
|
self,
|
|
164
186
|
url: str,
|
|
187
|
+
*,
|
|
165
188
|
force_download: bool = False,
|
|
166
189
|
vendor_metadata: dict[str, Any] | None = None,
|
|
167
190
|
media_type: str | None = None,
|
|
168
191
|
kind: Literal['video-url'] = 'video-url',
|
|
169
|
-
|
|
192
|
+
identifier: str | None = None,
|
|
170
193
|
# Required for inline-snapshot which expects all dataclass `__init__` methods to take all field names as kwargs.
|
|
171
194
|
_media_type: str | None = None,
|
|
172
195
|
) -> None:
|
|
@@ -175,6 +198,7 @@ class VideoUrl(FileUrl):
|
|
|
175
198
|
force_download=force_download,
|
|
176
199
|
vendor_metadata=vendor_metadata,
|
|
177
200
|
media_type=media_type or _media_type,
|
|
201
|
+
identifier=identifier,
|
|
178
202
|
)
|
|
179
203
|
self.kind = kind
|
|
180
204
|
|
|
@@ -235,11 +259,12 @@ class AudioUrl(FileUrl):
|
|
|
235
259
|
def __init__(
|
|
236
260
|
self,
|
|
237
261
|
url: str,
|
|
262
|
+
*,
|
|
238
263
|
force_download: bool = False,
|
|
239
264
|
vendor_metadata: dict[str, Any] | None = None,
|
|
240
265
|
media_type: str | None = None,
|
|
241
266
|
kind: Literal['audio-url'] = 'audio-url',
|
|
242
|
-
|
|
267
|
+
identifier: str | None = None,
|
|
243
268
|
# Required for inline-snapshot which expects all dataclass `__init__` methods to take all field names as kwargs.
|
|
244
269
|
_media_type: str | None = None,
|
|
245
270
|
) -> None:
|
|
@@ -248,6 +273,7 @@ class AudioUrl(FileUrl):
|
|
|
248
273
|
force_download=force_download,
|
|
249
274
|
vendor_metadata=vendor_metadata,
|
|
250
275
|
media_type=media_type or _media_type,
|
|
276
|
+
identifier=identifier,
|
|
251
277
|
)
|
|
252
278
|
self.kind = kind
|
|
253
279
|
|
|
@@ -295,11 +321,12 @@ class ImageUrl(FileUrl):
|
|
|
295
321
|
def __init__(
|
|
296
322
|
self,
|
|
297
323
|
url: str,
|
|
324
|
+
*,
|
|
298
325
|
force_download: bool = False,
|
|
299
326
|
vendor_metadata: dict[str, Any] | None = None,
|
|
300
327
|
media_type: str | None = None,
|
|
301
328
|
kind: Literal['image-url'] = 'image-url',
|
|
302
|
-
|
|
329
|
+
identifier: str | None = None,
|
|
303
330
|
# Required for inline-snapshot which expects all dataclass `__init__` methods to take all field names as kwargs.
|
|
304
331
|
_media_type: str | None = None,
|
|
305
332
|
) -> None:
|
|
@@ -308,6 +335,7 @@ class ImageUrl(FileUrl):
|
|
|
308
335
|
force_download=force_download,
|
|
309
336
|
vendor_metadata=vendor_metadata,
|
|
310
337
|
media_type=media_type or _media_type,
|
|
338
|
+
identifier=identifier,
|
|
311
339
|
)
|
|
312
340
|
self.kind = kind
|
|
313
341
|
|
|
@@ -350,11 +378,12 @@ class DocumentUrl(FileUrl):
|
|
|
350
378
|
def __init__(
|
|
351
379
|
self,
|
|
352
380
|
url: str,
|
|
381
|
+
*,
|
|
353
382
|
force_download: bool = False,
|
|
354
383
|
vendor_metadata: dict[str, Any] | None = None,
|
|
355
384
|
media_type: str | None = None,
|
|
356
385
|
kind: Literal['document-url'] = 'document-url',
|
|
357
|
-
|
|
386
|
+
identifier: str | None = None,
|
|
358
387
|
# Required for inline-snapshot which expects all dataclass `__init__` methods to take all field names as kwargs.
|
|
359
388
|
_media_type: str | None = None,
|
|
360
389
|
) -> None:
|
|
@@ -363,6 +392,7 @@ class DocumentUrl(FileUrl):
|
|
|
363
392
|
force_download=force_download,
|
|
364
393
|
vendor_metadata=vendor_metadata,
|
|
365
394
|
media_type=media_type or _media_type,
|
|
395
|
+
identifier=identifier,
|
|
366
396
|
)
|
|
367
397
|
self.kind = kind
|
|
368
398
|
|
|
@@ -405,24 +435,26 @@ class DocumentUrl(FileUrl):
|
|
|
405
435
|
raise ValueError(f'Unknown document media type: {media_type}') from e
|
|
406
436
|
|
|
407
437
|
|
|
408
|
-
@dataclass(repr=False)
|
|
438
|
+
@dataclass(init=False, repr=False)
|
|
409
439
|
class BinaryContent:
|
|
410
440
|
"""Binary content, e.g. an audio or image file."""
|
|
411
441
|
|
|
412
442
|
data: bytes
|
|
413
443
|
"""The binary data."""
|
|
414
444
|
|
|
415
|
-
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
|
|
416
|
-
"""The media type of the binary data."""
|
|
417
|
-
|
|
418
445
|
_: KW_ONLY
|
|
419
446
|
|
|
420
|
-
|
|
421
|
-
"""
|
|
447
|
+
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
|
|
448
|
+
"""The media type of the binary data."""
|
|
422
449
|
|
|
423
|
-
|
|
450
|
+
identifier: str
|
|
451
|
+
"""Identifier for the binary content, such as a unique ID. generating one from the data if not explicitly set
|
|
452
|
+
This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
|
|
453
|
+
and the tool can look up the file in question by iterating over the message history and finding the matching `BinaryContent`.
|
|
424
454
|
|
|
425
|
-
This identifier is only automatically passed to the model when the `BinaryContent` is returned by a tool.
|
|
455
|
+
This identifier is only automatically passed to the model when the `BinaryContent` is returned by a tool.
|
|
456
|
+
If you're passing the `BinaryContent` as a user message, it's up to you to include a separate text part with the identifier,
|
|
457
|
+
e.g. "This is file <identifier>:" preceding the `BinaryContent`.
|
|
426
458
|
"""
|
|
427
459
|
|
|
428
460
|
vendor_metadata: dict[str, Any] | None = None
|
|
@@ -435,6 +467,21 @@ class BinaryContent:
|
|
|
435
467
|
kind: Literal['binary'] = 'binary'
|
|
436
468
|
"""Type identifier, this is available on all parts as a discriminator."""
|
|
437
469
|
|
|
470
|
+
def __init__(
|
|
471
|
+
self,
|
|
472
|
+
data: bytes,
|
|
473
|
+
*,
|
|
474
|
+
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str,
|
|
475
|
+
identifier: str | None = None,
|
|
476
|
+
vendor_metadata: dict[str, Any] | None = None,
|
|
477
|
+
kind: Literal['binary'] = 'binary',
|
|
478
|
+
) -> None:
|
|
479
|
+
self.data = data
|
|
480
|
+
self.media_type = media_type
|
|
481
|
+
self.identifier = identifier or _multi_modal_content_identifier(data)
|
|
482
|
+
self.vendor_metadata = vendor_metadata
|
|
483
|
+
self.kind = kind
|
|
484
|
+
|
|
438
485
|
@property
|
|
439
486
|
def is_audio(self) -> bool:
|
|
440
487
|
"""Return `True` if the media type is an audio type."""
|
|
@@ -786,7 +833,7 @@ ModelRequestPart = Annotated[
|
|
|
786
833
|
class ModelRequest:
|
|
787
834
|
"""A request generated by Pydantic AI and sent to a model, e.g. a message from the Pydantic AI app to the model."""
|
|
788
835
|
|
|
789
|
-
parts:
|
|
836
|
+
parts: Sequence[ModelRequestPart]
|
|
790
837
|
"""The parts of the user message."""
|
|
791
838
|
|
|
792
839
|
_: KW_ONLY
|
|
@@ -941,7 +988,7 @@ ModelResponsePart = Annotated[
|
|
|
941
988
|
class ModelResponse:
|
|
942
989
|
"""A response from a model, e.g. a message from the model to the Pydantic AI app."""
|
|
943
990
|
|
|
944
|
-
parts:
|
|
991
|
+
parts: Sequence[ModelResponsePart]
|
|
945
992
|
"""The parts of the model message."""
|
|
946
993
|
|
|
947
994
|
_: KW_ONLY
|
|
@@ -967,18 +1014,30 @@ class ModelResponse:
|
|
|
967
1014
|
provider_name: str | None = None
|
|
968
1015
|
"""The name of the LLM provider that generated the response."""
|
|
969
1016
|
|
|
970
|
-
provider_details:
|
|
1017
|
+
provider_details: Annotated[
|
|
1018
|
+
dict[str, Any] | None,
|
|
1019
|
+
# `vendor_details` is deprecated, but we still want to support deserializing model responses stored in a DB before the name was changed
|
|
1020
|
+
pydantic.Field(validation_alias=pydantic.AliasChoices('provider_details', 'vendor_details')),
|
|
1021
|
+
] = None
|
|
971
1022
|
"""Additional provider-specific details in a serializable format.
|
|
972
1023
|
|
|
973
1024
|
This allows storing selected vendor-specific data that isn't mapped to standard ModelResponse fields.
|
|
974
1025
|
For OpenAI models, this may include 'logprobs', 'finish_reason', etc.
|
|
975
1026
|
"""
|
|
976
1027
|
|
|
977
|
-
provider_response_id:
|
|
1028
|
+
provider_response_id: Annotated[
|
|
1029
|
+
str | None,
|
|
1030
|
+
# `vendor_id` is deprecated, but we still want to support deserializing model responses stored in a DB before the name was changed
|
|
1031
|
+
pydantic.Field(validation_alias=pydantic.AliasChoices('provider_response_id', 'vendor_id')),
|
|
1032
|
+
] = None
|
|
978
1033
|
"""request ID as specified by the model provider. This can be used to track the specific request to the model."""
|
|
979
1034
|
|
|
980
|
-
|
|
981
|
-
|
|
1035
|
+
@deprecated('`price` is deprecated, use `cost` instead')
|
|
1036
|
+
def price(self) -> genai_types.PriceCalculation: # pragma: no cover
|
|
1037
|
+
return self.cost()
|
|
1038
|
+
|
|
1039
|
+
def cost(self) -> genai_types.PriceCalculation:
|
|
1040
|
+
"""Calculate the cost of the usage.
|
|
982
1041
|
|
|
983
1042
|
Uses [`genai-prices`](https://github.com/pydantic/genai-prices).
|
|
984
1043
|
"""
|
pydantic_ai/models/__init__.py
CHANGED
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -536,7 +536,7 @@ class AnthropicModel(Model):
|
|
|
536
536
|
}
|
|
537
537
|
|
|
538
538
|
|
|
539
|
-
def _map_usage(message: BetaMessage |
|
|
539
|
+
def _map_usage(message: BetaMessage | BetaRawMessageStartEvent | BetaRawMessageDeltaEvent) -> usage.RequestUsage:
|
|
540
540
|
if isinstance(message, BetaMessage):
|
|
541
541
|
response_usage = message.usage
|
|
542
542
|
elif isinstance(message, BetaRawMessageStartEvent):
|
|
@@ -544,12 +544,7 @@ def _map_usage(message: BetaMessage | BetaRawMessageStreamEvent) -> usage.Reques
|
|
|
544
544
|
elif isinstance(message, BetaRawMessageDeltaEvent):
|
|
545
545
|
response_usage = message.usage
|
|
546
546
|
else:
|
|
547
|
-
|
|
548
|
-
# - RawMessageStopEvent
|
|
549
|
-
# - RawContentBlockStartEvent
|
|
550
|
-
# - RawContentBlockDeltaEvent
|
|
551
|
-
# - RawContentBlockStopEvent
|
|
552
|
-
return usage.RequestUsage()
|
|
547
|
+
assert_never(message)
|
|
553
548
|
|
|
554
549
|
# Store all integer-typed usage values in the details, except 'output_tokens' which is represented exactly by
|
|
555
550
|
# `response_tokens`
|
|
@@ -586,10 +581,8 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
586
581
|
current_block: BetaContentBlock | None = None
|
|
587
582
|
|
|
588
583
|
async for event in self._response:
|
|
589
|
-
self._usage += _map_usage(event)
|
|
590
|
-
|
|
591
584
|
if isinstance(event, BetaRawMessageStartEvent):
|
|
592
|
-
|
|
585
|
+
self._usage = _map_usage(event)
|
|
593
586
|
|
|
594
587
|
elif isinstance(event, BetaRawContentBlockStartEvent):
|
|
595
588
|
current_block = event.content_block
|
|
@@ -652,7 +645,7 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
652
645
|
pass
|
|
653
646
|
|
|
654
647
|
elif isinstance(event, BetaRawMessageDeltaEvent):
|
|
655
|
-
|
|
648
|
+
self._usage = _map_usage(event)
|
|
656
649
|
|
|
657
650
|
elif isinstance(event, BetaRawContentBlockStopEvent | BetaRawMessageStopEvent): # pragma: no branch
|
|
658
651
|
current_block = None
|
pydantic_ai/models/bedrock.py
CHANGED
|
@@ -2,7 +2,6 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import functools
|
|
4
4
|
import typing
|
|
5
|
-
import warnings
|
|
6
5
|
from collections.abc import AsyncIterator, Iterable, Iterator, Mapping
|
|
7
6
|
from contextlib import asynccontextmanager
|
|
8
7
|
from dataclasses import dataclass, field
|
|
@@ -601,7 +600,7 @@ class BedrockStreamedResponse(StreamedResponse):
|
|
|
601
600
|
_provider_name: str
|
|
602
601
|
_timestamp: datetime = field(default_factory=_utils.now_utc)
|
|
603
602
|
|
|
604
|
-
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
603
|
+
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
605
604
|
"""Return an async iterator of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s.
|
|
606
605
|
|
|
607
606
|
This method should be implemented by subclasses to translate the vendor-specific stream of events into
|
|
@@ -638,18 +637,11 @@ class BedrockStreamedResponse(StreamedResponse):
|
|
|
638
637
|
index = content_block_delta['contentBlockIndex']
|
|
639
638
|
delta = content_block_delta['delta']
|
|
640
639
|
if 'reasoningContent' in delta:
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
)
|
|
647
|
-
else: # pragma: no cover
|
|
648
|
-
warnings.warn(
|
|
649
|
-
f'Only text reasoning content is supported yet, but you got {delta["reasoningContent"]}. '
|
|
650
|
-
'Please report this to the maintainers.',
|
|
651
|
-
UserWarning,
|
|
652
|
-
)
|
|
640
|
+
yield self._parts_manager.handle_thinking_delta(
|
|
641
|
+
vendor_part_id=index,
|
|
642
|
+
content=delta['reasoningContent'].get('text'),
|
|
643
|
+
signature=delta['reasoningContent'].get('signature'),
|
|
644
|
+
)
|
|
653
645
|
if 'text' in delta:
|
|
654
646
|
maybe_event = self._parts_manager.handle_text_delta(vendor_part_id=index, content=delta['text'])
|
|
655
647
|
if maybe_event is not None: # pragma: no branch
|
pydantic_ai/models/gemini.py
CHANGED
|
@@ -211,7 +211,9 @@ class GeminiModel(Model):
|
|
|
211
211
|
generation_config = _settings_to_generation_config(model_settings)
|
|
212
212
|
if model_request_parameters.output_mode == 'native':
|
|
213
213
|
if tools:
|
|
214
|
-
raise UserError(
|
|
214
|
+
raise UserError(
|
|
215
|
+
'Gemini does not support `NativeOutput` and tools at the same time. Use `output_type=ToolOutput(...)` instead.'
|
|
216
|
+
)
|
|
215
217
|
|
|
216
218
|
generation_config['response_mime_type'] = 'application/json'
|
|
217
219
|
|
pydantic_ai/models/google.py
CHANGED
|
@@ -264,6 +264,14 @@ class GoogleModel(Model):
|
|
|
264
264
|
yield await self._process_streamed_response(response, model_request_parameters) # type: ignore
|
|
265
265
|
|
|
266
266
|
def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolDict] | None:
|
|
267
|
+
if model_request_parameters.builtin_tools:
|
|
268
|
+
if model_request_parameters.output_tools:
|
|
269
|
+
raise UserError(
|
|
270
|
+
'Gemini does not support output tools and built-in tools at the same time. Use `output_type=PromptedOutput(...)` instead.'
|
|
271
|
+
)
|
|
272
|
+
if model_request_parameters.function_tools:
|
|
273
|
+
raise UserError('Gemini does not support user tools and built-in tools at the same time.')
|
|
274
|
+
|
|
267
275
|
tools: list[ToolDict] = [
|
|
268
276
|
ToolDict(function_declarations=[_function_declaration_from_tool(t)])
|
|
269
277
|
for t in model_request_parameters.tool_defs.values()
|
|
@@ -334,7 +342,9 @@ class GoogleModel(Model):
|
|
|
334
342
|
response_schema = None
|
|
335
343
|
if model_request_parameters.output_mode == 'native':
|
|
336
344
|
if tools:
|
|
337
|
-
raise UserError(
|
|
345
|
+
raise UserError(
|
|
346
|
+
'Gemini does not support `NativeOutput` and tools at the same time. Use `output_type=ToolOutput(...)` instead.'
|
|
347
|
+
)
|
|
338
348
|
response_mime_type = 'application/json'
|
|
339
349
|
output_object = model_request_parameters.output_object
|
|
340
350
|
assert output_object is not None
|
|
@@ -559,6 +569,10 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
559
569
|
)
|
|
560
570
|
if maybe_event is not None: # pragma: no branch
|
|
561
571
|
yield maybe_event
|
|
572
|
+
elif part.executable_code is not None:
|
|
573
|
+
pass
|
|
574
|
+
elif part.code_execution_result is not None:
|
|
575
|
+
pass
|
|
562
576
|
else:
|
|
563
577
|
assert part.function_response is not None, f'Unexpected part: {part}' # pragma: no cover
|
|
564
578
|
|