pydantic-ai-slim 0.4.10__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/_function_schema.py +7 -4
- pydantic_ai/_parts_manager.py +8 -9
- pydantic_ai/_thinking_part.py +7 -12
- pydantic_ai/ag_ui.py +346 -316
- pydantic_ai/agent.py +7 -5
- pydantic_ai/messages.py +37 -10
- pydantic_ai/models/__init__.py +2 -2
- pydantic_ai/models/cohere.py +1 -1
- pydantic_ai/models/gemini.py +1 -1
- pydantic_ai/models/google.py +1 -1
- pydantic_ai/models/groq.py +7 -3
- pydantic_ai/models/huggingface.py +7 -2
- pydantic_ai/models/mistral.py +1 -1
- pydantic_ai/models/openai.py +7 -3
- pydantic_ai/models/test.py +3 -1
- pydantic_ai/profiles/__init__.py +3 -0
- pydantic_ai/profiles/anthropic.py +1 -1
- pydantic_ai/profiles/openai.py +22 -12
- pydantic_ai/tools.py +13 -5
- {pydantic_ai_slim-0.4.10.dist-info → pydantic_ai_slim-0.5.0.dist-info}/METADATA +3 -3
- {pydantic_ai_slim-0.4.10.dist-info → pydantic_ai_slim-0.5.0.dist-info}/RECORD +24 -24
- {pydantic_ai_slim-0.4.10.dist-info → pydantic_ai_slim-0.5.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.4.10.dist-info → pydantic_ai_slim-0.5.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.4.10.dist-info → pydantic_ai_slim-0.5.0.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/agent.py
CHANGED
|
@@ -1870,9 +1870,13 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1870
1870
|
on_shutdown: Sequence[Callable[[], Any]] | None = None,
|
|
1871
1871
|
lifespan: Lifespan[AGUIApp[AgentDepsT, OutputDataT]] | None = None,
|
|
1872
1872
|
) -> AGUIApp[AgentDepsT, OutputDataT]:
|
|
1873
|
-
"""
|
|
1873
|
+
"""Returns an ASGI application that handles every AG-UI request by running the agent.
|
|
1874
1874
|
|
|
1875
|
-
|
|
1875
|
+
Note that the `deps` will be the same for each request, with the exception of the AG-UI state that's
|
|
1876
|
+
injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ag_ui.StateHandler] protocol.
|
|
1877
|
+
To provide different `deps` for each request (e.g. based on the authenticated user),
|
|
1878
|
+
use [`pydantic_ai.ag_ui.run_ag_ui`][pydantic_ai.ag_ui.run_ag_ui] or
|
|
1879
|
+
[`pydantic_ai.ag_ui.handle_ag_ui_request`][pydantic_ai.ag_ui.handle_ag_ui_request] instead.
|
|
1876
1880
|
|
|
1877
1881
|
Example:
|
|
1878
1882
|
```python
|
|
@@ -1882,8 +1886,6 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1882
1886
|
app = agent.to_ag_ui()
|
|
1883
1887
|
```
|
|
1884
1888
|
|
|
1885
|
-
The `app` is an ASGI application that can be used with any ASGI server.
|
|
1886
|
-
|
|
1887
1889
|
To run the application, you can use the following command:
|
|
1888
1890
|
|
|
1889
1891
|
```bash
|
|
@@ -1902,7 +1904,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1902
1904
|
usage_limits: Optional limits on model request count or token usage.
|
|
1903
1905
|
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
1904
1906
|
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
1905
|
-
toolsets: Optional
|
|
1907
|
+
toolsets: Optional additional toolsets for this run.
|
|
1906
1908
|
|
|
1907
1909
|
debug: Boolean indicating if debug tracebacks should be returned on errors.
|
|
1908
1910
|
routes: A list of routes to serve incoming HTTP and WebSocket requests.
|
pydantic_ai/messages.py
CHANGED
|
@@ -106,7 +106,7 @@ class FileUrl(ABC):
|
|
|
106
106
|
- `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
107
107
|
"""
|
|
108
108
|
|
|
109
|
-
_media_type: str | None = field(init=False, repr=False)
|
|
109
|
+
_media_type: str | None = field(init=False, repr=False, compare=False)
|
|
110
110
|
|
|
111
111
|
def __init__(
|
|
112
112
|
self,
|
|
@@ -120,19 +120,21 @@ class FileUrl(ABC):
|
|
|
120
120
|
self.force_download = force_download
|
|
121
121
|
self._media_type = media_type
|
|
122
122
|
|
|
123
|
-
@abstractmethod
|
|
124
|
-
def _infer_media_type(self) -> str:
|
|
125
|
-
"""Return the media type of the file, based on the url."""
|
|
126
|
-
|
|
127
123
|
@property
|
|
128
124
|
def media_type(self) -> str:
|
|
129
|
-
"""Return the media type of the file, based on the
|
|
125
|
+
"""Return the media type of the file, based on the URL or the provided `media_type`."""
|
|
130
126
|
return self._media_type or self._infer_media_type()
|
|
131
127
|
|
|
128
|
+
@abstractmethod
|
|
129
|
+
def _infer_media_type(self) -> str:
|
|
130
|
+
"""Infer the media type of the file based on the URL."""
|
|
131
|
+
raise NotImplementedError
|
|
132
|
+
|
|
132
133
|
@property
|
|
133
134
|
@abstractmethod
|
|
134
135
|
def format(self) -> str:
|
|
135
136
|
"""The file format."""
|
|
137
|
+
raise NotImplementedError
|
|
136
138
|
|
|
137
139
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
138
140
|
|
|
@@ -182,7 +184,9 @@ class VideoUrl(FileUrl):
|
|
|
182
184
|
elif self.is_youtube:
|
|
183
185
|
return 'video/mp4'
|
|
184
186
|
else:
|
|
185
|
-
raise ValueError(
|
|
187
|
+
raise ValueError(
|
|
188
|
+
f'Could not infer media type from video URL: {self.url}. Explicitly provide a `media_type` instead.'
|
|
189
|
+
)
|
|
186
190
|
|
|
187
191
|
@property
|
|
188
192
|
def is_youtube(self) -> bool:
|
|
@@ -238,7 +242,9 @@ class AudioUrl(FileUrl):
|
|
|
238
242
|
if self.url.endswith('.aac'):
|
|
239
243
|
return 'audio/aac'
|
|
240
244
|
|
|
241
|
-
raise ValueError(
|
|
245
|
+
raise ValueError(
|
|
246
|
+
f'Could not infer media type from audio URL: {self.url}. Explicitly provide a `media_type` instead.'
|
|
247
|
+
)
|
|
242
248
|
|
|
243
249
|
@property
|
|
244
250
|
def format(self) -> AudioFormat:
|
|
@@ -278,7 +284,9 @@ class ImageUrl(FileUrl):
|
|
|
278
284
|
elif self.url.endswith('.webp'):
|
|
279
285
|
return 'image/webp'
|
|
280
286
|
else:
|
|
281
|
-
raise ValueError(
|
|
287
|
+
raise ValueError(
|
|
288
|
+
f'Could not infer media type from image URL: {self.url}. Explicitly provide a `media_type` instead.'
|
|
289
|
+
)
|
|
282
290
|
|
|
283
291
|
@property
|
|
284
292
|
def format(self) -> ImageFormat:
|
|
@@ -312,9 +320,28 @@ class DocumentUrl(FileUrl):
|
|
|
312
320
|
|
|
313
321
|
def _infer_media_type(self) -> str:
|
|
314
322
|
"""Return the media type of the document, based on the url."""
|
|
323
|
+
# Common document types are hardcoded here as mime-type support for these
|
|
324
|
+
# extensions varies across operating systems.
|
|
325
|
+
if self.url.endswith(('.md', '.mdx', '.markdown')):
|
|
326
|
+
return 'text/markdown'
|
|
327
|
+
elif self.url.endswith('.asciidoc'):
|
|
328
|
+
return 'text/x-asciidoc'
|
|
329
|
+
elif self.url.endswith('.txt'):
|
|
330
|
+
return 'text/plain'
|
|
331
|
+
elif self.url.endswith('.pdf'):
|
|
332
|
+
return 'application/pdf'
|
|
333
|
+
elif self.url.endswith('.rtf'):
|
|
334
|
+
return 'application/rtf'
|
|
335
|
+
elif self.url.endswith('.docx'):
|
|
336
|
+
return 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
|
|
337
|
+
elif self.url.endswith('.xlsx'):
|
|
338
|
+
return 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
|
339
|
+
|
|
315
340
|
type_, _ = guess_type(self.url)
|
|
316
341
|
if type_ is None:
|
|
317
|
-
raise ValueError(
|
|
342
|
+
raise ValueError(
|
|
343
|
+
f'Could not infer media type from document URL: {self.url}. Explicitly provide a `media_type` instead.'
|
|
344
|
+
)
|
|
318
345
|
return type_
|
|
319
346
|
|
|
320
347
|
@property
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -137,12 +137,12 @@ KnownModelName = TypeAliasType(
|
|
|
137
137
|
'google-gla:gemini-2.0-flash',
|
|
138
138
|
'google-gla:gemini-2.0-flash-lite',
|
|
139
139
|
'google-gla:gemini-2.5-flash',
|
|
140
|
-
'google-gla:gemini-2.5-flash-lite
|
|
140
|
+
'google-gla:gemini-2.5-flash-lite',
|
|
141
141
|
'google-gla:gemini-2.5-pro',
|
|
142
142
|
'google-vertex:gemini-2.0-flash',
|
|
143
143
|
'google-vertex:gemini-2.0-flash-lite',
|
|
144
144
|
'google-vertex:gemini-2.5-flash',
|
|
145
|
-
'google-vertex:gemini-2.5-flash-lite
|
|
145
|
+
'google-vertex:gemini-2.5-flash-lite',
|
|
146
146
|
'google-vertex:gemini-2.5-pro',
|
|
147
147
|
'gpt-3.5-turbo',
|
|
148
148
|
'gpt-3.5-turbo-0125',
|
pydantic_ai/models/cohere.py
CHANGED
|
@@ -192,7 +192,7 @@ class CohereModel(Model):
|
|
|
192
192
|
# While Cohere's API returns a list, it only does that for future proofing
|
|
193
193
|
# and currently only one item is being returned.
|
|
194
194
|
choice = response.message.content[0]
|
|
195
|
-
parts.extend(split_content_into_text_and_thinking(choice.text))
|
|
195
|
+
parts.extend(split_content_into_text_and_thinking(choice.text, self.profile.thinking_tags))
|
|
196
196
|
for c in response.message.tool_calls or []:
|
|
197
197
|
if c.function and c.function.name and c.function.arguments: # pragma: no branch
|
|
198
198
|
parts.append(
|
pydantic_ai/models/gemini.py
CHANGED
pydantic_ai/models/google.py
CHANGED
pydantic_ai/models/groq.py
CHANGED
|
@@ -30,7 +30,7 @@ from ..messages import (
|
|
|
30
30
|
ToolReturnPart,
|
|
31
31
|
UserPromptPart,
|
|
32
32
|
)
|
|
33
|
-
from ..profiles import ModelProfileSpec
|
|
33
|
+
from ..profiles import ModelProfile, ModelProfileSpec
|
|
34
34
|
from ..providers import Provider, infer_provider
|
|
35
35
|
from ..settings import ModelSettings
|
|
36
36
|
from ..tools import ToolDefinition
|
|
@@ -261,7 +261,7 @@ class GroqModel(Model):
|
|
|
261
261
|
items.append(ThinkingPart(content=choice.message.reasoning))
|
|
262
262
|
if choice.message.content is not None:
|
|
263
263
|
# NOTE: The `<think>` tag is only present if `groq_reasoning_format` is set to `raw`.
|
|
264
|
-
items.extend(split_content_into_text_and_thinking(choice.message.content))
|
|
264
|
+
items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
|
|
265
265
|
if choice.message.tool_calls is not None:
|
|
266
266
|
for c in choice.message.tool_calls:
|
|
267
267
|
items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id))
|
|
@@ -281,6 +281,7 @@ class GroqModel(Model):
|
|
|
281
281
|
return GroqStreamedResponse(
|
|
282
282
|
_response=peekable_response,
|
|
283
283
|
_model_name=self._model_name,
|
|
284
|
+
_model_profile=self.profile,
|
|
284
285
|
_timestamp=number_to_datetime(first_chunk.created),
|
|
285
286
|
)
|
|
286
287
|
|
|
@@ -400,6 +401,7 @@ class GroqStreamedResponse(StreamedResponse):
|
|
|
400
401
|
"""Implementation of `StreamedResponse` for Groq models."""
|
|
401
402
|
|
|
402
403
|
_model_name: GroqModelName
|
|
404
|
+
_model_profile: ModelProfile
|
|
403
405
|
_response: AsyncIterable[chat.ChatCompletionChunk]
|
|
404
406
|
_timestamp: datetime
|
|
405
407
|
|
|
@@ -416,7 +418,9 @@ class GroqStreamedResponse(StreamedResponse):
|
|
|
416
418
|
content = choice.delta.content
|
|
417
419
|
if content is not None:
|
|
418
420
|
maybe_event = self._parts_manager.handle_text_delta(
|
|
419
|
-
vendor_part_id='content',
|
|
421
|
+
vendor_part_id='content',
|
|
422
|
+
content=content,
|
|
423
|
+
thinking_tags=self._model_profile.thinking_tags,
|
|
420
424
|
)
|
|
421
425
|
if maybe_event is not None: # pragma: no branch
|
|
422
426
|
yield maybe_event
|
|
@@ -33,6 +33,7 @@ from ..messages import (
|
|
|
33
33
|
UserPromptPart,
|
|
34
34
|
VideoUrl,
|
|
35
35
|
)
|
|
36
|
+
from ..profiles import ModelProfile
|
|
36
37
|
from ..settings import ModelSettings
|
|
37
38
|
from ..tools import ToolDefinition
|
|
38
39
|
from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests
|
|
@@ -244,7 +245,7 @@ class HuggingFaceModel(Model):
|
|
|
244
245
|
items: list[ModelResponsePart] = []
|
|
245
246
|
|
|
246
247
|
if content is not None:
|
|
247
|
-
items.extend(split_content_into_text_and_thinking(content))
|
|
248
|
+
items.extend(split_content_into_text_and_thinking(content, self.profile.thinking_tags))
|
|
248
249
|
if tool_calls is not None:
|
|
249
250
|
for c in tool_calls:
|
|
250
251
|
items.append(ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id))
|
|
@@ -267,6 +268,7 @@ class HuggingFaceModel(Model):
|
|
|
267
268
|
|
|
268
269
|
return HuggingFaceStreamedResponse(
|
|
269
270
|
_model_name=self._model_name,
|
|
271
|
+
_model_profile=self.profile,
|
|
270
272
|
_response=peekable_response,
|
|
271
273
|
_timestamp=datetime.fromtimestamp(first_chunk.created, tz=timezone.utc),
|
|
272
274
|
)
|
|
@@ -412,6 +414,7 @@ class HuggingFaceStreamedResponse(StreamedResponse):
|
|
|
412
414
|
"""Implementation of `StreamedResponse` for Hugging Face models."""
|
|
413
415
|
|
|
414
416
|
_model_name: str
|
|
417
|
+
_model_profile: ModelProfile
|
|
415
418
|
_response: AsyncIterable[ChatCompletionStreamOutput]
|
|
416
419
|
_timestamp: datetime
|
|
417
420
|
|
|
@@ -428,7 +431,9 @@ class HuggingFaceStreamedResponse(StreamedResponse):
|
|
|
428
431
|
content = choice.delta.content
|
|
429
432
|
if content:
|
|
430
433
|
maybe_event = self._parts_manager.handle_text_delta(
|
|
431
|
-
vendor_part_id='content',
|
|
434
|
+
vendor_part_id='content',
|
|
435
|
+
content=content,
|
|
436
|
+
thinking_tags=self._model_profile.thinking_tags,
|
|
432
437
|
)
|
|
433
438
|
if maybe_event is not None: # pragma: no branch
|
|
434
439
|
yield maybe_event
|
pydantic_ai/models/mistral.py
CHANGED
|
@@ -333,7 +333,7 @@ class MistralModel(Model):
|
|
|
333
333
|
|
|
334
334
|
parts: list[ModelResponsePart] = []
|
|
335
335
|
if text := _map_content(content):
|
|
336
|
-
parts.extend(split_content_into_text_and_thinking(text))
|
|
336
|
+
parts.extend(split_content_into_text_and_thinking(text, self.profile.thinking_tags))
|
|
337
337
|
|
|
338
338
|
if isinstance(tool_calls, list):
|
|
339
339
|
for tool_call in tool_calls:
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -37,7 +37,7 @@ from ..messages import (
|
|
|
37
37
|
UserPromptPart,
|
|
38
38
|
VideoUrl,
|
|
39
39
|
)
|
|
40
|
-
from ..profiles import ModelProfileSpec
|
|
40
|
+
from ..profiles import ModelProfile, ModelProfileSpec
|
|
41
41
|
from ..settings import ModelSettings
|
|
42
42
|
from ..tools import ToolDefinition
|
|
43
43
|
from . import (
|
|
@@ -407,7 +407,7 @@ class OpenAIModel(Model):
|
|
|
407
407
|
}
|
|
408
408
|
|
|
409
409
|
if choice.message.content is not None:
|
|
410
|
-
items.extend(split_content_into_text_and_thinking(choice.message.content))
|
|
410
|
+
items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
|
|
411
411
|
if choice.message.tool_calls is not None:
|
|
412
412
|
for c in choice.message.tool_calls:
|
|
413
413
|
part = ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)
|
|
@@ -433,6 +433,7 @@ class OpenAIModel(Model):
|
|
|
433
433
|
|
|
434
434
|
return OpenAIStreamedResponse(
|
|
435
435
|
_model_name=self._model_name,
|
|
436
|
+
_model_profile=self.profile,
|
|
436
437
|
_response=peekable_response,
|
|
437
438
|
_timestamp=number_to_datetime(first_chunk.created),
|
|
438
439
|
)
|
|
@@ -1009,6 +1010,7 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
1009
1010
|
"""Implementation of `StreamedResponse` for OpenAI models."""
|
|
1010
1011
|
|
|
1011
1012
|
_model_name: OpenAIModelName
|
|
1013
|
+
_model_profile: ModelProfile
|
|
1012
1014
|
_response: AsyncIterable[ChatCompletionChunk]
|
|
1013
1015
|
_timestamp: datetime
|
|
1014
1016
|
|
|
@@ -1025,7 +1027,9 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
1025
1027
|
content = choice.delta.content
|
|
1026
1028
|
if content:
|
|
1027
1029
|
maybe_event = self._parts_manager.handle_text_delta(
|
|
1028
|
-
vendor_part_id='content',
|
|
1030
|
+
vendor_part_id='content',
|
|
1031
|
+
content=content,
|
|
1032
|
+
thinking_tags=self._model_profile.thinking_tags,
|
|
1029
1033
|
)
|
|
1030
1034
|
if maybe_event is not None: # pragma: no branch
|
|
1031
1035
|
yield maybe_event
|
pydantic_ai/models/test.py
CHANGED
|
@@ -123,7 +123,9 @@ class TestModel(Model):
|
|
|
123
123
|
|
|
124
124
|
model_response = self._request(messages, model_settings, model_request_parameters)
|
|
125
125
|
yield TestStreamedResponse(
|
|
126
|
-
_model_name=self._model_name,
|
|
126
|
+
_model_name=self._model_name,
|
|
127
|
+
_structured_response=model_response,
|
|
128
|
+
_messages=messages,
|
|
127
129
|
)
|
|
128
130
|
|
|
129
131
|
@property
|
pydantic_ai/profiles/__init__.py
CHANGED
|
@@ -35,6 +35,9 @@ class ModelProfile:
|
|
|
35
35
|
json_schema_transformer: type[JsonSchemaTransformer] | None = None
|
|
36
36
|
"""The transformer to use to make JSON schemas for tools and structured output compatible with the model."""
|
|
37
37
|
|
|
38
|
+
thinking_tags: tuple[str, str] = ('<think>', '</think>')
|
|
39
|
+
"""The tags used to indicate thinking parts in the model's output. Defaults to ('<think>', '</think>')."""
|
|
40
|
+
|
|
38
41
|
@classmethod
|
|
39
42
|
def from_profile(cls, profile: ModelProfile | None) -> Self:
|
|
40
43
|
"""Build a ModelProfile subclass instance from a ModelProfile instance."""
|
pydantic_ai/profiles/openai.py
CHANGED
|
@@ -47,11 +47,6 @@ def openai_model_profile(model_name: str) -> ModelProfile:
|
|
|
47
47
|
_STRICT_INCOMPATIBLE_KEYS = [
|
|
48
48
|
'minLength',
|
|
49
49
|
'maxLength',
|
|
50
|
-
'pattern',
|
|
51
|
-
'format',
|
|
52
|
-
'minimum',
|
|
53
|
-
'maximum',
|
|
54
|
-
'multipleOf',
|
|
55
50
|
'patternProperties',
|
|
56
51
|
'unevaluatedProperties',
|
|
57
52
|
'propertyNames',
|
|
@@ -61,11 +56,21 @@ _STRICT_INCOMPATIBLE_KEYS = [
|
|
|
61
56
|
'contains',
|
|
62
57
|
'minContains',
|
|
63
58
|
'maxContains',
|
|
64
|
-
'minItems',
|
|
65
|
-
'maxItems',
|
|
66
59
|
'uniqueItems',
|
|
67
60
|
]
|
|
68
61
|
|
|
62
|
+
_STRICT_COMPATIBLE_STRING_FORMATS = [
|
|
63
|
+
'date-time',
|
|
64
|
+
'time',
|
|
65
|
+
'date',
|
|
66
|
+
'duration',
|
|
67
|
+
'email',
|
|
68
|
+
'hostname',
|
|
69
|
+
'ipv4',
|
|
70
|
+
'ipv6',
|
|
71
|
+
'uuid',
|
|
72
|
+
]
|
|
73
|
+
|
|
69
74
|
_sentinel = object()
|
|
70
75
|
|
|
71
76
|
|
|
@@ -127,6 +132,9 @@ class OpenAIJsonSchemaTransformer(JsonSchemaTransformer):
|
|
|
127
132
|
value = schema.get(key, _sentinel)
|
|
128
133
|
if value is not _sentinel:
|
|
129
134
|
incompatible_values[key] = value
|
|
135
|
+
if format := schema.get('format'):
|
|
136
|
+
if format not in _STRICT_COMPATIBLE_STRING_FORMATS:
|
|
137
|
+
incompatible_values['format'] = format
|
|
130
138
|
description = schema.get('description')
|
|
131
139
|
if incompatible_values:
|
|
132
140
|
if self.strict is True:
|
|
@@ -158,11 +166,13 @@ class OpenAIJsonSchemaTransformer(JsonSchemaTransformer):
|
|
|
158
166
|
schema['required'] = list(schema['properties'].keys())
|
|
159
167
|
|
|
160
168
|
elif self.strict is None:
|
|
161
|
-
if (
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
169
|
+
if schema.get('additionalProperties', None) not in (None, False):
|
|
170
|
+
self.is_strict_compatible = False
|
|
171
|
+
else:
|
|
172
|
+
# additional properties are disallowed by default
|
|
173
|
+
schema['additionalProperties'] = False
|
|
174
|
+
|
|
175
|
+
if 'properties' not in schema or 'required' not in schema:
|
|
166
176
|
self.is_strict_compatible = False
|
|
167
177
|
else:
|
|
168
178
|
required = schema['required']
|
pydantic_ai/tools.py
CHANGED
|
@@ -133,11 +133,19 @@ A = TypeVar('A')
|
|
|
133
133
|
|
|
134
134
|
class GenerateToolJsonSchema(GenerateJsonSchema):
|
|
135
135
|
def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue:
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
if 'additionalProperties' not in
|
|
139
|
-
|
|
140
|
-
|
|
136
|
+
json_schema = super().typed_dict_schema(schema)
|
|
137
|
+
# Workaround for https://github.com/pydantic/pydantic/issues/12123
|
|
138
|
+
if 'additionalProperties' not in json_schema: # pragma: no branch
|
|
139
|
+
extra = schema.get('extra_behavior') or schema.get('config', {}).get('extra_fields_behavior')
|
|
140
|
+
if extra == 'allow':
|
|
141
|
+
extras_schema = schema.get('extras_schema', None)
|
|
142
|
+
if extras_schema is not None:
|
|
143
|
+
json_schema['additionalProperties'] = self.generate_inner(extras_schema) or True
|
|
144
|
+
else:
|
|
145
|
+
json_schema['additionalProperties'] = True # pragma: no cover
|
|
146
|
+
elif extra == 'forbid':
|
|
147
|
+
json_schema['additionalProperties'] = False
|
|
148
|
+
return json_schema
|
|
141
149
|
|
|
142
150
|
def _named_required_fields_schema(self, named_required_fields: Sequence[tuple[str, bool, Any]]) -> JsonSchemaValue:
|
|
143
151
|
# Remove largely-useless property titles
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.5.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,7 +30,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.5.0
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
@@ -51,7 +51,7 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
|
|
|
51
51
|
Provides-Extra: duckduckgo
|
|
52
52
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
53
53
|
Provides-Extra: evals
|
|
54
|
-
Requires-Dist: pydantic-evals==0.
|
|
54
|
+
Requires-Dist: pydantic-evals==0.5.0; extra == 'evals'
|
|
55
55
|
Provides-Extra: google
|
|
56
56
|
Requires-Dist: google-genai>=1.24.0; extra == 'google'
|
|
57
57
|
Provides-Extra: groq
|
|
@@ -3,30 +3,30 @@ pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
|
3
3
|
pydantic_ai/_a2a.py,sha256=Tw_j9VRud0rLEk5kRs4GhRyhWYioXnsoZaTTyISq4M4,12126
|
|
4
4
|
pydantic_ai/_agent_graph.py,sha256=-D_X7qyg4fHRbgR9ffKM_FU4KZ3qas-YgVvSmS0eXeI,37347
|
|
5
5
|
pydantic_ai/_cli.py,sha256=YkiW2u9HGPd9fsgo9dpK1DZvtUPk4uXGQJcm75XgfhU,13250
|
|
6
|
-
pydantic_ai/_function_schema.py,sha256=
|
|
6
|
+
pydantic_ai/_function_schema.py,sha256=YFHxb6bKfhgeY6rNdbuYXgndGCDanveUx2258xkSNlQ,11233
|
|
7
7
|
pydantic_ai/_griffe.py,sha256=Ugft16ZHw9CN_6-lW0Svn6jESK9zHXO_x4utkGBkbBI,5253
|
|
8
8
|
pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
|
|
9
9
|
pydantic_ai/_output.py,sha256=2k-nxfPNLJEb-wjnPhQo63lh-yQH1XsIhNG1hjsrim0,37462
|
|
10
|
-
pydantic_ai/_parts_manager.py,sha256=
|
|
10
|
+
pydantic_ai/_parts_manager.py,sha256=lWXN75zLy_MSDz4Wib65lqIPHk1SY8KDU8_OYaxG3yw,17788
|
|
11
11
|
pydantic_ai/_run_context.py,sha256=pqb_HPXytE1Z9zZRRuBboRYes_tVTC75WGTpZgnb2Ko,1691
|
|
12
12
|
pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
|
|
13
|
-
pydantic_ai/_thinking_part.py,sha256=
|
|
13
|
+
pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU,1304
|
|
14
14
|
pydantic_ai/_tool_manager.py,sha256=BdjPntbSshNvYVpYZUNxb-yib5n4GPqcDinbNpzhBVo,8960
|
|
15
15
|
pydantic_ai/_utils.py,sha256=0Pte4mjir4YFZJTa6i-H6Cra9NbVwSKjOKegArzUggk,16283
|
|
16
|
-
pydantic_ai/ag_ui.py,sha256=
|
|
17
|
-
pydantic_ai/agent.py,sha256=
|
|
16
|
+
pydantic_ai/ag_ui.py,sha256=snIBVMcUUm3WWZ5P5orikyAzvM-7vGunNMgIudhvK-A,26156
|
|
17
|
+
pydantic_ai/agent.py,sha256=VJOvadfilVm60BxWqxtXrzmNTnN8tuhapvFk2r13RO4,107234
|
|
18
18
|
pydantic_ai/direct.py,sha256=WRfgke3zm-eeR39LTuh9XI2TrdHXAqO81eDvFwih4Ko,14803
|
|
19
19
|
pydantic_ai/exceptions.py,sha256=o0l6fBrWI5UhosICVZ2yaT-JEJF05eqBlKlQCW8i9UM,3462
|
|
20
20
|
pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
|
|
21
21
|
pydantic_ai/format_prompt.py,sha256=Or-Ytq55RQb1UJqy2HKIyPpZ-knWXfdDP3Z6tNc6Orw,4244
|
|
22
22
|
pydantic_ai/mcp.py,sha256=v_f4CRzJ399uPC96aqTiEzpaYvuo6vIQyLIpXQBudsY,26271
|
|
23
|
-
pydantic_ai/messages.py,sha256=
|
|
23
|
+
pydantic_ai/messages.py,sha256=OfaGM4AfMNTQJOCNJWDB1dVtMlLhPh2vzufCOPx2vjI,43767
|
|
24
24
|
pydantic_ai/output.py,sha256=54Cwd1RruXlA5hucZ1h-SxFrzKHJuLvYvLtH9iyg2GI,11988
|
|
25
25
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
pydantic_ai/result.py,sha256=bFo2oQZiSw1wvpa6k3rZ3ae2KhzoiQno0F8glKWpvgg,25328
|
|
27
27
|
pydantic_ai/retries.py,sha256=Xkj-gZAd3wc12CVsIErVYx2EIdIwD5yJOL4Ou6jDQ2s,10498
|
|
28
28
|
pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
|
|
29
|
-
pydantic_ai/tools.py,sha256=
|
|
29
|
+
pydantic_ai/tools.py,sha256=4UNpllVugXaJWLlehVgjoOyM-r4jy4vJ7uSUSbsPBiQ,14765
|
|
30
30
|
pydantic_ai/usage.py,sha256=ceC9HHflyM_rkLBJqtaWPc-M8bEoq5rZF4XwGblPQoU,5830
|
|
31
31
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
32
|
pydantic_ai/common_tools/duckduckgo.py,sha256=aQsm7zKuoRNgPM8ltbdyj8dPkREEkQenimsf_laF6kc,2245
|
|
@@ -34,26 +34,26 @@ pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQ
|
|
|
34
34
|
pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
35
|
pydantic_ai/ext/aci.py,sha256=vUaNIj6pRM52x6RkPW_DohSYxJPm75pPUfOMw2i5Xx0,2515
|
|
36
36
|
pydantic_ai/ext/langchain.py,sha256=GemxfhpyG1JPxj69PbRiSJANnY8Q5s4hSB7wqt-uTbo,2266
|
|
37
|
-
pydantic_ai/models/__init__.py,sha256=
|
|
37
|
+
pydantic_ai/models/__init__.py,sha256=UDi-zXjRt_Zb8kaN5OKMxGXnJtDkROpfa66tSz_wNdI,30884
|
|
38
38
|
pydantic_ai/models/anthropic.py,sha256=dMPFqIeYCIhoeU_4uk9PmZYQWL1NbkSVmVrBKXplTiI,24167
|
|
39
39
|
pydantic_ai/models/bedrock.py,sha256=O6wKZDu4L18L1L2Nsa-XMW4ch073FjcLKRA5t_NXcHU,29511
|
|
40
|
-
pydantic_ai/models/cohere.py,sha256=
|
|
40
|
+
pydantic_ai/models/cohere.py,sha256=GYhQ6jkCYDHf3ca1835aig9o59XTvsyw4ISAVThYejA,12828
|
|
41
41
|
pydantic_ai/models/fallback.py,sha256=URaV-dTQWkg99xrlkmknue5lXZWDcEt7cJ1Vsky4oB4,5130
|
|
42
42
|
pydantic_ai/models/function.py,sha256=iHhG6GYN14XDo3_qbdliv_umY10B7-k11aoDoVF4xP8,13563
|
|
43
|
-
pydantic_ai/models/gemini.py,sha256=
|
|
44
|
-
pydantic_ai/models/google.py,sha256=
|
|
45
|
-
pydantic_ai/models/groq.py,sha256=
|
|
46
|
-
pydantic_ai/models/huggingface.py,sha256=
|
|
43
|
+
pydantic_ai/models/gemini.py,sha256=J-05fngctXSqk3NzLaemt0h6r3S6jmr9ArvlWQE5Q0A,38124
|
|
44
|
+
pydantic_ai/models/google.py,sha256=PNN5Z5VYPioT0-FzS4PoZ33es26AfUqwMBLfHhrElnw,24380
|
|
45
|
+
pydantic_ai/models/groq.py,sha256=JX3Hi8tUJTsTj2A6CGoDhpW4IwNUxgOk0Ta58OCEL_A,19035
|
|
46
|
+
pydantic_ai/models/huggingface.py,sha256=g4Z2C_e_OddYyGKLSOtP4nCL-AbWxmOdkW4zFcFtLq0,19222
|
|
47
47
|
pydantic_ai/models/instrumented.py,sha256=aqvzspcGexn1Molbu6Mn4EEPRBSoQCCCS_yknJvJJ-8,16205
|
|
48
48
|
pydantic_ai/models/mcp_sampling.py,sha256=q9nnjNEAAbhrfRc_Qw5z9TtCHMG_SwlCWW9FvKWjh8k,3395
|
|
49
|
-
pydantic_ai/models/mistral.py,sha256=
|
|
50
|
-
pydantic_ai/models/openai.py,sha256=
|
|
51
|
-
pydantic_ai/models/test.py,sha256=
|
|
49
|
+
pydantic_ai/models/mistral.py,sha256=bj56Meckuji8r4vowiFJMDSli-HZktocqSqtbzgpXa0,31455
|
|
50
|
+
pydantic_ai/models/openai.py,sha256=Soqb7kZpQLBS6En7hVlhzBMlS07rjISJ9IlH96bBBBU,56122
|
|
51
|
+
pydantic_ai/models/test.py,sha256=lGMblastixKF_f5MhP3TcvLWx7jj94H4ohmL7DMpdGo,18482
|
|
52
52
|
pydantic_ai/models/wrapper.py,sha256=A5-ncYhPF8c9S_czGoXkd55s2KOQb65p3jbVpwZiFPA,2043
|
|
53
|
-
pydantic_ai/profiles/__init__.py,sha256=
|
|
53
|
+
pydantic_ai/profiles/__init__.py,sha256=uC1_64Pb0O1IMt_SwzvU3W7a2_T3pvdoSDcm8_WI7hw,2592
|
|
54
54
|
pydantic_ai/profiles/_json_schema.py,sha256=sTNHkaK0kbwmbldZp9JRGQNax0f5Qvwy0HkWuu_nGxU,7179
|
|
55
55
|
pydantic_ai/profiles/amazon.py,sha256=O4ijm1Lpz01vaSiHrkSeGQhbCKV5lyQVtHYqh0pCW_k,339
|
|
56
|
-
pydantic_ai/profiles/anthropic.py,sha256=
|
|
56
|
+
pydantic_ai/profiles/anthropic.py,sha256=J9N46G8eOjHdQ5CwZSLiwGdPb0eeIMdsMjwosDpvNhI,275
|
|
57
57
|
pydantic_ai/profiles/cohere.py,sha256=lcL34Ht1jZopwuqoU6OV9l8vN4zwF-jiPjlsEABbSRo,215
|
|
58
58
|
pydantic_ai/profiles/deepseek.py,sha256=DS_idprnXpMliKziKF0k1neLDJOwUvpatZ3YLaiYnCM,219
|
|
59
59
|
pydantic_ai/profiles/google.py,sha256=cd5zwtx0MU1Xwm8c-oqi2_OJ2-PMJ8Vy23mxvSJF7ik,4856
|
|
@@ -61,7 +61,7 @@ pydantic_ai/profiles/grok.py,sha256=nBOxOCYCK9aiLmz2Q-esqYhotNbbBC1boAoOYIk1tVw,
|
|
|
61
61
|
pydantic_ai/profiles/meta.py,sha256=IAGPoUrLWd-g9ajAgpWp9fIeOrP-7dBlZ2HEFjIhUbY,334
|
|
62
62
|
pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
|
|
63
63
|
pydantic_ai/profiles/moonshotai.py,sha256=LL5RacKHKn6rdvhoKjpGgZ8aVriv5NMeL6HCWEANAiU,223
|
|
64
|
-
pydantic_ai/profiles/openai.py,sha256=
|
|
64
|
+
pydantic_ai/profiles/openai.py,sha256=80ffJK9T7YbBRR7mhyRkjDB29pYi2H4dIQOAmIr5Ppc,7530
|
|
65
65
|
pydantic_ai/profiles/qwen.py,sha256=u7pL8uomoQTVl45g5wDrHx0P_oFDLaN6ALswuwmkWc0,334
|
|
66
66
|
pydantic_ai/providers/__init__.py,sha256=6Jm4ioGiI5jcwKUC2Yxv-GHdrK3ZTJmb-9_eHBZgfdw,4005
|
|
67
67
|
pydantic_ai/providers/anthropic.py,sha256=D35UXxCPXv8yIbD0fj9Zg2FvNyoMoJMeDUtVM8Sn78I,3046
|
|
@@ -94,8 +94,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=MIStkzUdiU0rk2Y6P19IrTBxspH5pTstGxsqCBt-
|
|
|
94
94
|
pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
|
|
95
95
|
pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
|
|
96
96
|
pydantic_ai/toolsets/wrapper.py,sha256=WjLoiM1WDuffSJ4mDS6pZrEZGHgZ421fjrqFcB66W94,1205
|
|
97
|
-
pydantic_ai_slim-0.
|
|
98
|
-
pydantic_ai_slim-0.
|
|
99
|
-
pydantic_ai_slim-0.
|
|
100
|
-
pydantic_ai_slim-0.
|
|
101
|
-
pydantic_ai_slim-0.
|
|
97
|
+
pydantic_ai_slim-0.5.0.dist-info/METADATA,sha256=LGb7rsYjr9K6qD8sAP7-kSPjcTIWnL6x_reSXWeYmTQ,4173
|
|
98
|
+
pydantic_ai_slim-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
99
|
+
pydantic_ai_slim-0.5.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
100
|
+
pydantic_ai_slim-0.5.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
101
|
+
pydantic_ai_slim-0.5.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|