pydantic-ai-slim 1.0.10__py3-none-any.whl → 1.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +134 -4
- pydantic_ai/_a2a.py +1 -1
- pydantic_ai/_function_schema.py +18 -10
- pydantic_ai/{profiles/_json_schema.py → _json_schema.py} +5 -3
- pydantic_ai/_output.py +1 -8
- pydantic_ai/_thinking_part.py +1 -1
- pydantic_ai/_utils.py +24 -7
- pydantic_ai/agent/__init__.py +1 -2
- pydantic_ai/builtin_tools.py +20 -1
- pydantic_ai/common_tools/duckduckgo.py +2 -2
- pydantic_ai/common_tools/tavily.py +2 -2
- pydantic_ai/direct.py +4 -4
- pydantic_ai/durable_exec/dbos/_agent.py +1 -1
- pydantic_ai/durable_exec/dbos/_mcp_server.py +1 -2
- pydantic_ai/durable_exec/dbos/_model.py +2 -2
- pydantic_ai/durable_exec/temporal/_agent.py +1 -1
- pydantic_ai/durable_exec/temporal/_function_toolset.py +1 -1
- pydantic_ai/durable_exec/temporal/_mcp_server.py +1 -1
- pydantic_ai/durable_exec/temporal/_model.py +3 -3
- pydantic_ai/durable_exec/temporal/_toolset.py +1 -3
- pydantic_ai/ext/aci.py +1 -1
- pydantic_ai/ext/langchain.py +1 -1
- pydantic_ai/mcp.py +21 -7
- pydantic_ai/messages.py +16 -11
- pydantic_ai/models/__init__.py +3 -82
- pydantic_ai/models/anthropic.py +36 -23
- pydantic_ai/models/bedrock.py +6 -5
- pydantic_ai/models/google.py +2 -2
- pydantic_ai/models/instrumented.py +27 -11
- pydantic_ai/models/openai.py +115 -33
- pydantic_ai/output.py +23 -2
- pydantic_ai/profiles/__init__.py +1 -1
- pydantic_ai/profiles/google.py +1 -1
- pydantic_ai/profiles/harmony.py +3 -1
- pydantic_ai/profiles/openai.py +1 -1
- pydantic_ai/providers/__init__.py +1 -1
- pydantic_ai/providers/anthropic.py +1 -1
- pydantic_ai/providers/azure.py +1 -1
- pydantic_ai/providers/bedrock.py +1 -1
- pydantic_ai/providers/cerebras.py +1 -1
- pydantic_ai/providers/cohere.py +1 -1
- pydantic_ai/providers/deepseek.py +1 -1
- pydantic_ai/providers/fireworks.py +1 -1
- pydantic_ai/providers/github.py +1 -1
- pydantic_ai/providers/google.py +1 -1
- pydantic_ai/providers/google_gla.py +1 -1
- pydantic_ai/providers/google_vertex.py +1 -1
- pydantic_ai/providers/grok.py +1 -1
- pydantic_ai/providers/groq.py +1 -1
- pydantic_ai/providers/heroku.py +1 -1
- pydantic_ai/providers/huggingface.py +1 -1
- pydantic_ai/providers/litellm.py +1 -1
- pydantic_ai/providers/mistral.py +1 -1
- pydantic_ai/providers/moonshotai.py +1 -1
- pydantic_ai/providers/ollama.py +3 -1
- pydantic_ai/providers/openai.py +1 -1
- pydantic_ai/providers/openrouter.py +1 -1
- pydantic_ai/providers/together.py +1 -1
- pydantic_ai/providers/vercel.py +1 -1
- pydantic_ai/toolsets/function.py +1 -2
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/METADATA +6 -6
- pydantic_ai_slim-1.0.12.dist-info/RECORD +127 -0
- pydantic_ai_slim-1.0.10.dist-info/RECORD +0 -127
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.10.dist-info → pydantic_ai_slim-1.0.12.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/models/openai.py
CHANGED
|
@@ -519,6 +519,10 @@ class OpenAIChatModel(Model):
|
|
|
519
519
|
timestamp = _now_utc()
|
|
520
520
|
response.created = int(timestamp.timestamp())
|
|
521
521
|
|
|
522
|
+
# Workaround for local Ollama which sometimes returns a `None` finish reason.
|
|
523
|
+
if response.choices and (choice := response.choices[0]) and choice.finish_reason is None: # pyright: ignore[reportUnnecessaryComparison]
|
|
524
|
+
choice.finish_reason = 'stop'
|
|
525
|
+
|
|
522
526
|
try:
|
|
523
527
|
response = chat.ChatCompletion.model_validate(response.model_dump())
|
|
524
528
|
except ValidationError as e:
|
|
@@ -526,16 +530,20 @@ class OpenAIChatModel(Model):
|
|
|
526
530
|
|
|
527
531
|
choice = response.choices[0]
|
|
528
532
|
items: list[ModelResponsePart] = []
|
|
533
|
+
|
|
529
534
|
# The `reasoning_content` field is only present in DeepSeek models.
|
|
530
535
|
# https://api-docs.deepseek.com/guides/reasoning_model
|
|
531
536
|
if reasoning_content := getattr(choice.message, 'reasoning_content', None):
|
|
532
537
|
items.append(ThinkingPart(id='reasoning_content', content=reasoning_content, provider_name=self.system))
|
|
533
538
|
|
|
534
|
-
#
|
|
535
|
-
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks
|
|
536
|
-
# NOTE: We don't currently handle OpenRouter/gpt-oss `reasoning`:
|
|
539
|
+
# The `reasoning` field is only present in gpt-oss via Ollama and OpenRouter.
|
|
537
540
|
# - https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot#chat-completions-api
|
|
538
541
|
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#basic-usage-with-reasoning-tokens
|
|
542
|
+
if reasoning := getattr(choice.message, 'reasoning', None):
|
|
543
|
+
items.append(ThinkingPart(id='reasoning', content=reasoning, provider_name=self.system))
|
|
544
|
+
|
|
545
|
+
# NOTE: We don't currently handle OpenRouter `reasoning_details`:
|
|
546
|
+
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks
|
|
539
547
|
# If you need this, please file an issue.
|
|
540
548
|
|
|
541
549
|
vendor_details: dict[str, Any] = {}
|
|
@@ -742,8 +750,7 @@ class OpenAIChatModel(Model):
|
|
|
742
750
|
else:
|
|
743
751
|
assert_never(part)
|
|
744
752
|
|
|
745
|
-
|
|
746
|
-
async def _map_user_prompt(part: UserPromptPart) -> chat.ChatCompletionUserMessageParam:
|
|
753
|
+
async def _map_user_prompt(self, part: UserPromptPart) -> chat.ChatCompletionUserMessageParam: # noqa: C901
|
|
747
754
|
content: str | list[ChatCompletionContentPartParam]
|
|
748
755
|
if isinstance(part.content, str):
|
|
749
756
|
content = part.content
|
|
@@ -753,29 +760,45 @@ class OpenAIChatModel(Model):
|
|
|
753
760
|
if isinstance(item, str):
|
|
754
761
|
content.append(ChatCompletionContentPartTextParam(text=item, type='text'))
|
|
755
762
|
elif isinstance(item, ImageUrl):
|
|
756
|
-
image_url =
|
|
763
|
+
image_url: ImageURL = {'url': item.url}
|
|
764
|
+
if metadata := item.vendor_metadata:
|
|
765
|
+
image_url['detail'] = metadata.get('detail', 'auto')
|
|
757
766
|
content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
|
|
758
767
|
elif isinstance(item, BinaryContent):
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
image_url = ImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')
|
|
762
|
-
content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
|
|
763
|
-
elif item.is_audio:
|
|
764
|
-
assert item.format in ('wav', 'mp3')
|
|
765
|
-
audio = InputAudio(data=base64_encoded, format=item.format)
|
|
766
|
-
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
767
|
-
elif item.is_document:
|
|
768
|
+
if self._is_text_like_media_type(item.media_type):
|
|
769
|
+
# Inline text-like binary content as a text block
|
|
768
770
|
content.append(
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
),
|
|
774
|
-
type='file',
|
|
771
|
+
self._inline_text_file_part(
|
|
772
|
+
item.data.decode('utf-8'),
|
|
773
|
+
media_type=item.media_type,
|
|
774
|
+
identifier=item.identifier,
|
|
775
775
|
)
|
|
776
776
|
)
|
|
777
|
-
else:
|
|
778
|
-
|
|
777
|
+
else:
|
|
778
|
+
base64_encoded = base64.b64encode(item.data).decode('utf-8')
|
|
779
|
+
if item.is_image:
|
|
780
|
+
image_url: ImageURL = {'url': f'data:{item.media_type};base64,{base64_encoded}'}
|
|
781
|
+
if metadata := item.vendor_metadata:
|
|
782
|
+
image_url['detail'] = metadata.get('detail', 'auto')
|
|
783
|
+
content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
|
|
784
|
+
elif item.is_audio:
|
|
785
|
+
assert item.format in ('wav', 'mp3')
|
|
786
|
+
audio = InputAudio(data=base64_encoded, format=item.format)
|
|
787
|
+
content.append(
|
|
788
|
+
ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio')
|
|
789
|
+
)
|
|
790
|
+
elif item.is_document:
|
|
791
|
+
content.append(
|
|
792
|
+
File(
|
|
793
|
+
file=FileFile(
|
|
794
|
+
file_data=f'data:{item.media_type};base64,{base64_encoded}',
|
|
795
|
+
filename=f'filename.{item.format}',
|
|
796
|
+
),
|
|
797
|
+
type='file',
|
|
798
|
+
)
|
|
799
|
+
)
|
|
800
|
+
else: # pragma: no cover
|
|
801
|
+
raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
|
|
779
802
|
elif isinstance(item, AudioUrl):
|
|
780
803
|
downloaded_item = await download_item(item, data_format='base64', type_format='extension')
|
|
781
804
|
assert downloaded_item['data_type'] in (
|
|
@@ -785,20 +808,54 @@ class OpenAIChatModel(Model):
|
|
|
785
808
|
audio = InputAudio(data=downloaded_item['data'], format=downloaded_item['data_type'])
|
|
786
809
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
787
810
|
elif isinstance(item, DocumentUrl):
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
811
|
+
if self._is_text_like_media_type(item.media_type):
|
|
812
|
+
downloaded_text = await download_item(item, data_format='text')
|
|
813
|
+
content.append(
|
|
814
|
+
self._inline_text_file_part(
|
|
815
|
+
downloaded_text['data'],
|
|
816
|
+
media_type=item.media_type,
|
|
817
|
+
identifier=item.identifier,
|
|
818
|
+
)
|
|
819
|
+
)
|
|
820
|
+
else:
|
|
821
|
+
downloaded_item = await download_item(item, data_format='base64_uri', type_format='extension')
|
|
822
|
+
content.append(
|
|
823
|
+
File(
|
|
824
|
+
file=FileFile(
|
|
825
|
+
file_data=downloaded_item['data'],
|
|
826
|
+
filename=f'filename.{downloaded_item["data_type"]}',
|
|
827
|
+
),
|
|
828
|
+
type='file',
|
|
829
|
+
)
|
|
830
|
+
)
|
|
796
831
|
elif isinstance(item, VideoUrl): # pragma: no cover
|
|
797
832
|
raise NotImplementedError('VideoUrl is not supported for OpenAI')
|
|
798
833
|
else:
|
|
799
834
|
assert_never(item)
|
|
800
835
|
return chat.ChatCompletionUserMessageParam(role='user', content=content)
|
|
801
836
|
|
|
837
|
+
@staticmethod
|
|
838
|
+
def _is_text_like_media_type(media_type: str) -> bool:
|
|
839
|
+
return (
|
|
840
|
+
media_type.startswith('text/')
|
|
841
|
+
or media_type == 'application/json'
|
|
842
|
+
or media_type.endswith('+json')
|
|
843
|
+
or media_type == 'application/xml'
|
|
844
|
+
or media_type.endswith('+xml')
|
|
845
|
+
or media_type in ('application/x-yaml', 'application/yaml')
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
@staticmethod
|
|
849
|
+
def _inline_text_file_part(text: str, *, media_type: str, identifier: str) -> ChatCompletionContentPartTextParam:
|
|
850
|
+
text = '\n'.join(
|
|
851
|
+
[
|
|
852
|
+
f'-----BEGIN FILE id="{identifier}" type="{media_type}"-----',
|
|
853
|
+
text,
|
|
854
|
+
f'-----END FILE id="{identifier}"-----',
|
|
855
|
+
]
|
|
856
|
+
)
|
|
857
|
+
return ChatCompletionContentPartTextParam(text=text, type='text')
|
|
858
|
+
|
|
802
859
|
|
|
803
860
|
@deprecated(
|
|
804
861
|
'`OpenAIModel` was renamed to `OpenAIChatModel` to clearly distinguish it from `OpenAIResponsesModel` which '
|
|
@@ -1383,11 +1440,17 @@ class OpenAIResponsesModel(Model):
|
|
|
1383
1440
|
elif isinstance(item, BinaryContent):
|
|
1384
1441
|
base64_encoded = base64.b64encode(item.data).decode('utf-8')
|
|
1385
1442
|
if item.is_image:
|
|
1443
|
+
detail: Literal['auto', 'low', 'high'] = 'auto'
|
|
1444
|
+
if metadata := item.vendor_metadata:
|
|
1445
|
+
detail = cast(
|
|
1446
|
+
Literal['auto', 'low', 'high'],
|
|
1447
|
+
metadata.get('detail', 'auto'),
|
|
1448
|
+
)
|
|
1386
1449
|
content.append(
|
|
1387
1450
|
responses.ResponseInputImageParam(
|
|
1388
1451
|
image_url=f'data:{item.media_type};base64,{base64_encoded}',
|
|
1389
1452
|
type='input_image',
|
|
1390
|
-
detail=
|
|
1453
|
+
detail=detail,
|
|
1391
1454
|
)
|
|
1392
1455
|
)
|
|
1393
1456
|
elif item.is_document:
|
|
@@ -1406,8 +1469,15 @@ class OpenAIResponsesModel(Model):
|
|
|
1406
1469
|
else: # pragma: no cover
|
|
1407
1470
|
raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
|
|
1408
1471
|
elif isinstance(item, ImageUrl):
|
|
1472
|
+
detail: Literal['auto', 'low', 'high'] = 'auto'
|
|
1473
|
+
if metadata := item.vendor_metadata:
|
|
1474
|
+
detail = cast(Literal['auto', 'low', 'high'], metadata.get('detail', 'auto'))
|
|
1409
1475
|
content.append(
|
|
1410
|
-
responses.ResponseInputImageParam(
|
|
1476
|
+
responses.ResponseInputImageParam(
|
|
1477
|
+
image_url=item.url,
|
|
1478
|
+
type='input_image',
|
|
1479
|
+
detail=detail,
|
|
1480
|
+
)
|
|
1411
1481
|
)
|
|
1412
1482
|
elif isinstance(item, AudioUrl): # pragma: no cover
|
|
1413
1483
|
downloaded_item = await download_item(item, data_format='base64_uri', type_format='extension')
|
|
@@ -1446,6 +1516,7 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
1446
1516
|
|
|
1447
1517
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
1448
1518
|
async for chunk in self._response:
|
|
1519
|
+
print(chunk)
|
|
1449
1520
|
self._usage += _map_usage(chunk)
|
|
1450
1521
|
|
|
1451
1522
|
if chunk.id: # pragma: no branch
|
|
@@ -1492,6 +1563,17 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
1492
1563
|
provider_name=self.provider_name,
|
|
1493
1564
|
)
|
|
1494
1565
|
|
|
1566
|
+
# The `reasoning` field is only present in gpt-oss via Ollama and OpenRouter.
|
|
1567
|
+
# - https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot#chat-completions-api
|
|
1568
|
+
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#basic-usage-with-reasoning-tokens
|
|
1569
|
+
if reasoning := getattr(choice.delta, 'reasoning', None): # pragma: no cover
|
|
1570
|
+
yield self._parts_manager.handle_thinking_delta(
|
|
1571
|
+
vendor_part_id='reasoning',
|
|
1572
|
+
id='reasoning',
|
|
1573
|
+
content=reasoning,
|
|
1574
|
+
provider_name=self.provider_name,
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1495
1577
|
for dtc in choice.delta.tool_calls or []:
|
|
1496
1578
|
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1497
1579
|
vendor_part_id=dtc.index,
|
pydantic_ai/output.py
CHANGED
|
@@ -9,9 +9,10 @@ from pydantic.json_schema import JsonSchemaValue
|
|
|
9
9
|
from pydantic_core import core_schema
|
|
10
10
|
from typing_extensions import TypeAliasType, TypeVar, deprecated
|
|
11
11
|
|
|
12
|
-
from . import _utils
|
|
12
|
+
from . import _utils, exceptions
|
|
13
|
+
from ._json_schema import InlineDefsJsonSchemaTransformer
|
|
13
14
|
from .messages import ToolCallPart
|
|
14
|
-
from .tools import DeferredToolRequests, RunContext, ToolDefinition
|
|
15
|
+
from .tools import DeferredToolRequests, ObjectJsonSchema, RunContext, ToolDefinition
|
|
15
16
|
|
|
16
17
|
__all__ = (
|
|
17
18
|
# classes
|
|
@@ -20,6 +21,7 @@ __all__ = (
|
|
|
20
21
|
'PromptedOutput',
|
|
21
22
|
'TextOutput',
|
|
22
23
|
'StructuredDict',
|
|
24
|
+
'OutputObjectDefinition',
|
|
23
25
|
# types
|
|
24
26
|
'OutputDataT',
|
|
25
27
|
'OutputMode',
|
|
@@ -242,6 +244,16 @@ class PromptedOutput(Generic[OutputDataT]):
|
|
|
242
244
|
self.template = template
|
|
243
245
|
|
|
244
246
|
|
|
247
|
+
@dataclass
|
|
248
|
+
class OutputObjectDefinition:
|
|
249
|
+
"""Definition of an output object used for structured output generation."""
|
|
250
|
+
|
|
251
|
+
json_schema: ObjectJsonSchema
|
|
252
|
+
name: str | None = None
|
|
253
|
+
description: str | None = None
|
|
254
|
+
strict: bool | None = None
|
|
255
|
+
|
|
256
|
+
|
|
245
257
|
@dataclass
|
|
246
258
|
class TextOutput(Generic[OutputDataT]):
|
|
247
259
|
"""Marker class to use text output for an output function taking a string argument.
|
|
@@ -300,6 +312,15 @@ def StructuredDict(
|
|
|
300
312
|
"""
|
|
301
313
|
json_schema = _utils.check_object_json_schema(json_schema)
|
|
302
314
|
|
|
315
|
+
# Pydantic `TypeAdapter` fails when `object.__get_pydantic_json_schema__` has `$defs`, so we inline them
|
|
316
|
+
# See https://github.com/pydantic/pydantic/issues/12145
|
|
317
|
+
if '$defs' in json_schema:
|
|
318
|
+
json_schema = InlineDefsJsonSchemaTransformer(json_schema).walk()
|
|
319
|
+
if '$defs' in json_schema:
|
|
320
|
+
raise exceptions.UserError(
|
|
321
|
+
'`StructuredDict` does not currently support recursive `$ref`s and `$defs`. See https://github.com/pydantic/pydantic/issues/12145 for more information.'
|
|
322
|
+
)
|
|
323
|
+
|
|
303
324
|
if name:
|
|
304
325
|
json_schema['title'] = name
|
|
305
326
|
|
pydantic_ai/profiles/__init__.py
CHANGED
|
@@ -6,8 +6,8 @@ from textwrap import dedent
|
|
|
6
6
|
|
|
7
7
|
from typing_extensions import Self
|
|
8
8
|
|
|
9
|
+
from .._json_schema import InlineDefsJsonSchemaTransformer, JsonSchemaTransformer
|
|
9
10
|
from ..output import StructuredOutputMode
|
|
10
|
-
from ._json_schema import InlineDefsJsonSchemaTransformer, JsonSchemaTransformer
|
|
11
11
|
|
|
12
12
|
__all__ = [
|
|
13
13
|
'ModelProfile',
|
pydantic_ai/profiles/google.py
CHANGED
|
@@ -4,8 +4,8 @@ import warnings
|
|
|
4
4
|
|
|
5
5
|
from pydantic_ai.exceptions import UserError
|
|
6
6
|
|
|
7
|
+
from .._json_schema import JsonSchema, JsonSchemaTransformer
|
|
7
8
|
from . import ModelProfile
|
|
8
|
-
from ._json_schema import JsonSchema, JsonSchemaTransformer
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
def google_model_profile(model_name: str) -> ModelProfile | None:
|
pydantic_ai/profiles/harmony.py
CHANGED
|
@@ -10,4 +10,6 @@ def harmony_model_profile(model_name: str) -> ModelProfile | None:
|
|
|
10
10
|
See <https://cookbook.openai.com/articles/openai-harmony> for more details.
|
|
11
11
|
"""
|
|
12
12
|
profile = openai_model_profile(model_name)
|
|
13
|
-
return OpenAIModelProfile(
|
|
13
|
+
return OpenAIModelProfile(
|
|
14
|
+
openai_supports_tool_choice_required=False, ignore_streamed_leading_whitespace=True
|
|
15
|
+
).update(profile)
|
pydantic_ai/profiles/openai.py
CHANGED
|
@@ -6,8 +6,8 @@ from collections.abc import Sequence
|
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from typing import Any, Literal
|
|
8
8
|
|
|
9
|
+
from .._json_schema import JsonSchema, JsonSchemaTransformer
|
|
9
10
|
from . import ModelProfile
|
|
10
|
-
from ._json_schema import JsonSchema, JsonSchemaTransformer
|
|
11
11
|
|
|
12
12
|
OpenAISystemPromptRole = Literal['system', 'developer', 'user']
|
|
13
13
|
|
|
@@ -8,7 +8,7 @@ from __future__ import annotations as _annotations
|
|
|
8
8
|
from abc import ABC, abstractmethod
|
|
9
9
|
from typing import Any, Generic, TypeVar
|
|
10
10
|
|
|
11
|
-
from pydantic_ai
|
|
11
|
+
from pydantic_ai import ModelProfile
|
|
12
12
|
|
|
13
13
|
InterfaceClient = TypeVar('InterfaceClient')
|
|
14
14
|
|
|
@@ -5,9 +5,9 @@ from typing import TypeAlias, overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.anthropic import anthropic_model_profile
|
|
12
12
|
from pydantic_ai.providers import Provider
|
|
13
13
|
|
pydantic_ai/providers/azure.py
CHANGED
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
13
13
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
14
14
|
from pydantic_ai.profiles.grok import grok_model_profile
|
pydantic_ai/providers/bedrock.py
CHANGED
|
@@ -6,8 +6,8 @@ from collections.abc import Callable
|
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from typing import Literal, overload
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.amazon import amazon_model_profile
|
|
12
12
|
from pydantic_ai.profiles.anthropic import anthropic_model_profile
|
|
13
13
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
@@ -5,9 +5,9 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.harmony import harmony_model_profile
|
|
12
12
|
from pydantic_ai.profiles.meta import meta_model_profile
|
|
13
13
|
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
|
pydantic_ai/providers/cohere.py
CHANGED
|
@@ -4,9 +4,9 @@ import os
|
|
|
4
4
|
|
|
5
5
|
import httpx
|
|
6
6
|
|
|
7
|
+
from pydantic_ai import ModelProfile
|
|
7
8
|
from pydantic_ai.exceptions import UserError
|
|
8
9
|
from pydantic_ai.models import cached_async_http_client
|
|
9
|
-
from pydantic_ai.profiles import ModelProfile
|
|
10
10
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
11
11
|
from pydantic_ai.providers import Provider
|
|
12
12
|
|
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
13
|
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
|
|
14
14
|
from pydantic_ai.providers import Provider
|
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
13
|
from pydantic_ai.profiles.google import google_model_profile
|
|
14
14
|
from pydantic_ai.profiles.meta import meta_model_profile
|
pydantic_ai/providers/github.py
CHANGED
|
@@ -5,9 +5,9 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
12
12
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
13
|
from pydantic_ai.profiles.grok import grok_model_profile
|
pydantic_ai/providers/google.py
CHANGED
|
@@ -5,9 +5,9 @@ from typing import Literal, overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import get_user_agent
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.google import google_model_profile
|
|
12
12
|
from pydantic_ai.providers import Provider
|
|
13
13
|
|
|
@@ -5,9 +5,9 @@ import os
|
|
|
5
5
|
import httpx
|
|
6
6
|
from typing_extensions import deprecated
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.google import google_model_profile
|
|
12
12
|
from pydantic_ai.providers import Provider
|
|
13
13
|
|
|
@@ -10,9 +10,9 @@ import anyio.to_thread
|
|
|
10
10
|
import httpx
|
|
11
11
|
from typing_extensions import deprecated
|
|
12
12
|
|
|
13
|
+
from pydantic_ai import ModelProfile
|
|
13
14
|
from pydantic_ai.exceptions import UserError
|
|
14
15
|
from pydantic_ai.models import cached_async_http_client
|
|
15
|
-
from pydantic_ai.profiles import ModelProfile
|
|
16
16
|
from pydantic_ai.profiles.google import google_model_profile
|
|
17
17
|
from pydantic_ai.providers import Provider
|
|
18
18
|
|
pydantic_ai/providers/grok.py
CHANGED
|
@@ -6,9 +6,9 @@ from typing import Literal, overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.grok import grok_model_profile
|
|
13
13
|
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
|
|
14
14
|
from pydantic_ai.providers import Provider
|
pydantic_ai/providers/groq.py
CHANGED
|
@@ -5,9 +5,9 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
12
12
|
from pydantic_ai.profiles.google import google_model_profile
|
|
13
13
|
from pydantic_ai.profiles.groq import groq_model_profile
|
pydantic_ai/providers/heroku.py
CHANGED
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
|
|
13
13
|
from pydantic_ai.providers import Provider
|
|
14
14
|
|
|
@@ -5,8 +5,8 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
from httpx import AsyncClient
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
|
-
from pydantic_ai.profiles import ModelProfile
|
|
10
10
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
11
11
|
from pydantic_ai.profiles.google import google_model_profile
|
|
12
12
|
from pydantic_ai.profiles.meta import meta_model_profile
|
pydantic_ai/providers/litellm.py
CHANGED
|
@@ -5,8 +5,8 @@ from typing import overload
|
|
|
5
5
|
from httpx import AsyncClient as AsyncHTTPClient
|
|
6
6
|
from openai import AsyncOpenAI
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.models import cached_async_http_client
|
|
9
|
-
from pydantic_ai.profiles import ModelProfile
|
|
10
10
|
from pydantic_ai.profiles.amazon import amazon_model_profile
|
|
11
11
|
from pydantic_ai.profiles.anthropic import anthropic_model_profile
|
|
12
12
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
pydantic_ai/providers/mistral.py
CHANGED
|
@@ -5,9 +5,9 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.mistral import mistral_model_profile
|
|
12
12
|
from pydantic_ai.providers import Provider
|
|
13
13
|
|
|
@@ -6,9 +6,9 @@ from typing import Literal, overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.moonshotai import moonshotai_model_profile
|
|
13
13
|
from pydantic_ai.profiles.openai import (
|
|
14
14
|
OpenAIJsonSchemaTransformer,
|
pydantic_ai/providers/ollama.py
CHANGED
|
@@ -5,12 +5,13 @@ import os
|
|
|
5
5
|
import httpx
|
|
6
6
|
from openai import AsyncOpenAI
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
12
12
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
13
|
from pydantic_ai.profiles.google import google_model_profile
|
|
14
|
+
from pydantic_ai.profiles.harmony import harmony_model_profile
|
|
14
15
|
from pydantic_ai.profiles.meta import meta_model_profile
|
|
15
16
|
from pydantic_ai.profiles.mistral import mistral_model_profile
|
|
16
17
|
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
|
|
@@ -50,6 +51,7 @@ class OllamaProvider(Provider[AsyncOpenAI]):
|
|
|
50
51
|
'deepseek': deepseek_model_profile,
|
|
51
52
|
'mistral': mistral_model_profile,
|
|
52
53
|
'command': cohere_model_profile,
|
|
54
|
+
'gpt-oss': harmony_model_profile,
|
|
53
55
|
}
|
|
54
56
|
|
|
55
57
|
profile = None
|
pydantic_ai/providers/openai.py
CHANGED
|
@@ -5,8 +5,8 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.models import cached_async_http_client
|
|
9
|
-
from pydantic_ai.profiles import ModelProfile
|
|
10
10
|
from pydantic_ai.profiles.openai import openai_model_profile
|
|
11
11
|
from pydantic_ai.providers import Provider
|
|
12
12
|
|
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.amazon import amazon_model_profile
|
|
13
13
|
from pydantic_ai.profiles.anthropic import anthropic_model_profile
|
|
14
14
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
@@ -6,9 +6,9 @@ from typing import overload
|
|
|
6
6
|
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
|
+
from pydantic_ai import ModelProfile
|
|
9
10
|
from pydantic_ai.exceptions import UserError
|
|
10
11
|
from pydantic_ai.models import cached_async_http_client
|
|
11
|
-
from pydantic_ai.profiles import ModelProfile
|
|
12
12
|
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
13
|
from pydantic_ai.profiles.google import google_model_profile
|
|
14
14
|
from pydantic_ai.profiles.meta import meta_model_profile
|
pydantic_ai/providers/vercel.py
CHANGED
|
@@ -5,9 +5,9 @@ from typing import overload
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from pydantic_ai import ModelProfile
|
|
8
9
|
from pydantic_ai.exceptions import UserError
|
|
9
10
|
from pydantic_ai.models import cached_async_http_client
|
|
10
|
-
from pydantic_ai.profiles import ModelProfile
|
|
11
11
|
from pydantic_ai.profiles.amazon import amazon_model_profile
|
|
12
12
|
from pydantic_ai.profiles.anthropic import anthropic_model_profile
|
|
13
13
|
from pydantic_ai.profiles.cohere import cohere_model_profile
|
pydantic_ai/toolsets/function.py
CHANGED
|
@@ -148,8 +148,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
|
|
|
148
148
|
|
|
149
149
|
Example:
|
|
150
150
|
```python
|
|
151
|
-
from pydantic_ai import Agent, RunContext
|
|
152
|
-
from pydantic_ai.toolsets.function import FunctionToolset
|
|
151
|
+
from pydantic_ai import Agent, FunctionToolset, RunContext
|
|
153
152
|
|
|
154
153
|
toolset = FunctionToolset()
|
|
155
154
|
|