pydantic-ai-slim 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (80) hide show
  1. pydantic_ai/__init__.py +134 -4
  2. pydantic_ai/_a2a.py +1 -1
  3. pydantic_ai/_agent_graph.py +4 -0
  4. pydantic_ai/_instrumentation.py +95 -0
  5. pydantic_ai/{profiles/_json_schema.py → _json_schema.py} +5 -3
  6. pydantic_ai/_output.py +26 -12
  7. pydantic_ai/_run_context.py +4 -0
  8. pydantic_ai/_thinking_part.py +1 -1
  9. pydantic_ai/_tool_manager.py +15 -7
  10. pydantic_ai/_utils.py +24 -7
  11. pydantic_ai/agent/__init__.py +68 -36
  12. pydantic_ai/agent/abstract.py +12 -1
  13. pydantic_ai/agent/wrapper.py +11 -3
  14. pydantic_ai/builtin_tools.py +20 -1
  15. pydantic_ai/common_tools/duckduckgo.py +2 -2
  16. pydantic_ai/common_tools/tavily.py +2 -2
  17. pydantic_ai/direct.py +6 -6
  18. pydantic_ai/durable_exec/dbos/_agent.py +12 -3
  19. pydantic_ai/durable_exec/dbos/_mcp_server.py +1 -2
  20. pydantic_ai/durable_exec/dbos/_model.py +2 -2
  21. pydantic_ai/durable_exec/temporal/_agent.py +13 -4
  22. pydantic_ai/durable_exec/temporal/_function_toolset.py +1 -1
  23. pydantic_ai/durable_exec/temporal/_mcp_server.py +1 -1
  24. pydantic_ai/durable_exec/temporal/_model.py +3 -3
  25. pydantic_ai/durable_exec/temporal/_toolset.py +1 -3
  26. pydantic_ai/ext/aci.py +1 -1
  27. pydantic_ai/ext/langchain.py +1 -1
  28. pydantic_ai/mcp.py +32 -8
  29. pydantic_ai/messages.py +14 -11
  30. pydantic_ai/models/__init__.py +19 -2
  31. pydantic_ai/models/anthropic.py +29 -14
  32. pydantic_ai/models/bedrock.py +14 -5
  33. pydantic_ai/models/cohere.py +4 -0
  34. pydantic_ai/models/fallback.py +2 -9
  35. pydantic_ai/models/function.py +8 -0
  36. pydantic_ai/models/gemini.py +8 -0
  37. pydantic_ai/models/google.py +14 -2
  38. pydantic_ai/models/groq.py +8 -0
  39. pydantic_ai/models/huggingface.py +8 -2
  40. pydantic_ai/models/instrumented.py +16 -6
  41. pydantic_ai/models/mcp_sampling.py +2 -0
  42. pydantic_ai/models/mistral.py +8 -0
  43. pydantic_ai/models/openai.py +95 -29
  44. pydantic_ai/models/test.py +8 -0
  45. pydantic_ai/models/wrapper.py +7 -0
  46. pydantic_ai/output.py +11 -1
  47. pydantic_ai/profiles/__init__.py +1 -1
  48. pydantic_ai/profiles/google.py +1 -1
  49. pydantic_ai/profiles/openai.py +1 -1
  50. pydantic_ai/providers/__init__.py +1 -1
  51. pydantic_ai/providers/anthropic.py +1 -1
  52. pydantic_ai/providers/azure.py +1 -1
  53. pydantic_ai/providers/bedrock.py +1 -1
  54. pydantic_ai/providers/cerebras.py +1 -1
  55. pydantic_ai/providers/cohere.py +1 -1
  56. pydantic_ai/providers/deepseek.py +1 -1
  57. pydantic_ai/providers/fireworks.py +1 -1
  58. pydantic_ai/providers/github.py +1 -1
  59. pydantic_ai/providers/google.py +1 -1
  60. pydantic_ai/providers/google_gla.py +1 -1
  61. pydantic_ai/providers/google_vertex.py +1 -1
  62. pydantic_ai/providers/grok.py +1 -1
  63. pydantic_ai/providers/groq.py +1 -1
  64. pydantic_ai/providers/heroku.py +1 -1
  65. pydantic_ai/providers/huggingface.py +1 -1
  66. pydantic_ai/providers/litellm.py +1 -1
  67. pydantic_ai/providers/mistral.py +1 -1
  68. pydantic_ai/providers/moonshotai.py +1 -1
  69. pydantic_ai/providers/ollama.py +1 -1
  70. pydantic_ai/providers/openai.py +1 -1
  71. pydantic_ai/providers/openrouter.py +1 -1
  72. pydantic_ai/providers/together.py +1 -1
  73. pydantic_ai/providers/vercel.py +1 -1
  74. pydantic_ai/toolsets/function.py +1 -2
  75. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/METADATA +3 -3
  76. pydantic_ai_slim-1.0.13.dist-info/RECORD +128 -0
  77. pydantic_ai_slim-1.0.11.dist-info/RECORD +0 -127
  78. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/WHEEL +0 -0
  79. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/entry_points.txt +0 -0
  80. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/licenses/LICENSE +0 -0
@@ -393,6 +393,10 @@ class OpenAIChatModel(Model):
393
393
  model_request_parameters: ModelRequestParameters,
394
394
  ) -> ModelResponse:
395
395
  check_allow_model_requests()
396
+ model_settings, model_request_parameters = self.prepare_request(
397
+ model_settings,
398
+ model_request_parameters,
399
+ )
396
400
  response = await self._completions_create(
397
401
  messages, False, cast(OpenAIChatModelSettings, model_settings or {}), model_request_parameters
398
402
  )
@@ -408,6 +412,10 @@ class OpenAIChatModel(Model):
408
412
  run_context: RunContext[Any] | None = None,
409
413
  ) -> AsyncIterator[StreamedResponse]:
410
414
  check_allow_model_requests()
415
+ model_settings, model_request_parameters = self.prepare_request(
416
+ model_settings,
417
+ model_request_parameters,
418
+ )
411
419
  response = await self._completions_create(
412
420
  messages, True, cast(OpenAIChatModelSettings, model_settings or {}), model_request_parameters
413
421
  )
@@ -519,6 +527,10 @@ class OpenAIChatModel(Model):
519
527
  timestamp = _now_utc()
520
528
  response.created = int(timestamp.timestamp())
521
529
 
530
+ # Workaround for local Ollama which sometimes returns a `None` finish reason.
531
+ if response.choices and (choice := response.choices[0]) and choice.finish_reason is None: # pyright: ignore[reportUnnecessaryComparison]
532
+ choice.finish_reason = 'stop'
533
+
522
534
  try:
523
535
  response = chat.ChatCompletion.model_validate(response.model_dump())
524
536
  except ValidationError as e:
@@ -746,8 +758,7 @@ class OpenAIChatModel(Model):
746
758
  else:
747
759
  assert_never(part)
748
760
 
749
- @staticmethod
750
- async def _map_user_prompt(part: UserPromptPart) -> chat.ChatCompletionUserMessageParam:
761
+ async def _map_user_prompt(self, part: UserPromptPart) -> chat.ChatCompletionUserMessageParam: # noqa: C901
751
762
  content: str | list[ChatCompletionContentPartParam]
752
763
  if isinstance(part.content, str):
753
764
  content = part.content
@@ -762,28 +773,40 @@ class OpenAIChatModel(Model):
762
773
  image_url['detail'] = metadata.get('detail', 'auto')
763
774
  content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
764
775
  elif isinstance(item, BinaryContent):
765
- base64_encoded = base64.b64encode(item.data).decode('utf-8')
766
- if item.is_image:
767
- image_url: ImageURL = {'url': f'data:{item.media_type};base64,{base64_encoded}'}
768
- if metadata := item.vendor_metadata:
769
- image_url['detail'] = metadata.get('detail', 'auto')
770
- content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
771
- elif item.is_audio:
772
- assert item.format in ('wav', 'mp3')
773
- audio = InputAudio(data=base64_encoded, format=item.format)
774
- content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
775
- elif item.is_document:
776
+ if self._is_text_like_media_type(item.media_type):
777
+ # Inline text-like binary content as a text block
776
778
  content.append(
777
- File(
778
- file=FileFile(
779
- file_data=f'data:{item.media_type};base64,{base64_encoded}',
780
- filename=f'filename.{item.format}',
781
- ),
782
- type='file',
779
+ self._inline_text_file_part(
780
+ item.data.decode('utf-8'),
781
+ media_type=item.media_type,
782
+ identifier=item.identifier,
783
783
  )
784
784
  )
785
- else: # pragma: no cover
786
- raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
785
+ else:
786
+ base64_encoded = base64.b64encode(item.data).decode('utf-8')
787
+ if item.is_image:
788
+ image_url: ImageURL = {'url': f'data:{item.media_type};base64,{base64_encoded}'}
789
+ if metadata := item.vendor_metadata:
790
+ image_url['detail'] = metadata.get('detail', 'auto')
791
+ content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
792
+ elif item.is_audio:
793
+ assert item.format in ('wav', 'mp3')
794
+ audio = InputAudio(data=base64_encoded, format=item.format)
795
+ content.append(
796
+ ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio')
797
+ )
798
+ elif item.is_document:
799
+ content.append(
800
+ File(
801
+ file=FileFile(
802
+ file_data=f'data:{item.media_type};base64,{base64_encoded}',
803
+ filename=f'filename.{item.format}',
804
+ ),
805
+ type='file',
806
+ )
807
+ )
808
+ else: # pragma: no cover
809
+ raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
787
810
  elif isinstance(item, AudioUrl):
788
811
  downloaded_item = await download_item(item, data_format='base64', type_format='extension')
789
812
  assert downloaded_item['data_type'] in (
@@ -793,20 +816,54 @@ class OpenAIChatModel(Model):
793
816
  audio = InputAudio(data=downloaded_item['data'], format=downloaded_item['data_type'])
794
817
  content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
795
818
  elif isinstance(item, DocumentUrl):
796
- downloaded_item = await download_item(item, data_format='base64_uri', type_format='extension')
797
- file = File(
798
- file=FileFile(
799
- file_data=downloaded_item['data'], filename=f'filename.{downloaded_item["data_type"]}'
800
- ),
801
- type='file',
802
- )
803
- content.append(file)
819
+ if self._is_text_like_media_type(item.media_type):
820
+ downloaded_text = await download_item(item, data_format='text')
821
+ content.append(
822
+ self._inline_text_file_part(
823
+ downloaded_text['data'],
824
+ media_type=item.media_type,
825
+ identifier=item.identifier,
826
+ )
827
+ )
828
+ else:
829
+ downloaded_item = await download_item(item, data_format='base64_uri', type_format='extension')
830
+ content.append(
831
+ File(
832
+ file=FileFile(
833
+ file_data=downloaded_item['data'],
834
+ filename=f'filename.{downloaded_item["data_type"]}',
835
+ ),
836
+ type='file',
837
+ )
838
+ )
804
839
  elif isinstance(item, VideoUrl): # pragma: no cover
805
840
  raise NotImplementedError('VideoUrl is not supported for OpenAI')
806
841
  else:
807
842
  assert_never(item)
808
843
  return chat.ChatCompletionUserMessageParam(role='user', content=content)
809
844
 
845
+ @staticmethod
846
+ def _is_text_like_media_type(media_type: str) -> bool:
847
+ return (
848
+ media_type.startswith('text/')
849
+ or media_type == 'application/json'
850
+ or media_type.endswith('+json')
851
+ or media_type == 'application/xml'
852
+ or media_type.endswith('+xml')
853
+ or media_type in ('application/x-yaml', 'application/yaml')
854
+ )
855
+
856
+ @staticmethod
857
+ def _inline_text_file_part(text: str, *, media_type: str, identifier: str) -> ChatCompletionContentPartTextParam:
858
+ text = '\n'.join(
859
+ [
860
+ f'-----BEGIN FILE id="{identifier}" type="{media_type}"-----',
861
+ text,
862
+ f'-----END FILE id="{identifier}"-----',
863
+ ]
864
+ )
865
+ return ChatCompletionContentPartTextParam(text=text, type='text')
866
+
810
867
 
811
868
  @deprecated(
812
869
  '`OpenAIModel` was renamed to `OpenAIChatModel` to clearly distinguish it from `OpenAIResponsesModel` which '
@@ -877,6 +934,10 @@ class OpenAIResponsesModel(Model):
877
934
  model_request_parameters: ModelRequestParameters,
878
935
  ) -> ModelResponse:
879
936
  check_allow_model_requests()
937
+ model_settings, model_request_parameters = self.prepare_request(
938
+ model_settings,
939
+ model_request_parameters,
940
+ )
880
941
  response = await self._responses_create(
881
942
  messages, False, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
882
943
  )
@@ -891,6 +952,10 @@ class OpenAIResponsesModel(Model):
891
952
  run_context: RunContext[Any] | None = None,
892
953
  ) -> AsyncIterator[StreamedResponse]:
893
954
  check_allow_model_requests()
955
+ model_settings, model_request_parameters = self.prepare_request(
956
+ model_settings,
957
+ model_request_parameters,
958
+ )
894
959
  response = await self._responses_create(
895
960
  messages, True, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
896
961
  )
@@ -1467,6 +1532,7 @@ class OpenAIStreamedResponse(StreamedResponse):
1467
1532
 
1468
1533
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1469
1534
  async for chunk in self._response:
1535
+ print(chunk)
1470
1536
  self._usage += _map_usage(chunk)
1471
1537
 
1472
1538
  if chunk.id: # pragma: no branch
@@ -110,6 +110,10 @@ class TestModel(Model):
110
110
  model_settings: ModelSettings | None,
111
111
  model_request_parameters: ModelRequestParameters,
112
112
  ) -> ModelResponse:
113
+ model_settings, model_request_parameters = self.prepare_request(
114
+ model_settings,
115
+ model_request_parameters,
116
+ )
113
117
  self.last_model_request_parameters = model_request_parameters
114
118
  model_response = self._request(messages, model_settings, model_request_parameters)
115
119
  model_response.usage = _estimate_usage([*messages, model_response])
@@ -123,6 +127,10 @@ class TestModel(Model):
123
127
  model_request_parameters: ModelRequestParameters,
124
128
  run_context: RunContext[Any] | None = None,
125
129
  ) -> AsyncIterator[StreamedResponse]:
130
+ model_settings, model_request_parameters = self.prepare_request(
131
+ model_settings,
132
+ model_request_parameters,
133
+ )
126
134
  self.last_model_request_parameters = model_request_parameters
127
135
 
128
136
  model_response = self._request(messages, model_settings, model_request_parameters)
@@ -46,6 +46,13 @@ class WrapperModel(Model):
46
46
  def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
47
47
  return self.wrapped.customize_request_parameters(model_request_parameters)
48
48
 
49
+ def prepare_request(
50
+ self,
51
+ model_settings: ModelSettings | None,
52
+ model_request_parameters: ModelRequestParameters,
53
+ ) -> tuple[ModelSettings | None, ModelRequestParameters]:
54
+ return self.wrapped.prepare_request(model_settings, model_request_parameters)
55
+
49
56
  @property
50
57
  def model_name(self) -> str:
51
58
  return self.wrapped.model_name
pydantic_ai/output.py CHANGED
@@ -9,7 +9,8 @@ from pydantic.json_schema import JsonSchemaValue
9
9
  from pydantic_core import core_schema
10
10
  from typing_extensions import TypeAliasType, TypeVar, deprecated
11
11
 
12
- from . import _utils
12
+ from . import _utils, exceptions
13
+ from ._json_schema import InlineDefsJsonSchemaTransformer
13
14
  from .messages import ToolCallPart
14
15
  from .tools import DeferredToolRequests, ObjectJsonSchema, RunContext, ToolDefinition
15
16
 
@@ -311,6 +312,15 @@ def StructuredDict(
311
312
  """
312
313
  json_schema = _utils.check_object_json_schema(json_schema)
313
314
 
315
+ # Pydantic `TypeAdapter` fails when `object.__get_pydantic_json_schema__` has `$defs`, so we inline them
316
+ # See https://github.com/pydantic/pydantic/issues/12145
317
+ if '$defs' in json_schema:
318
+ json_schema = InlineDefsJsonSchemaTransformer(json_schema).walk()
319
+ if '$defs' in json_schema:
320
+ raise exceptions.UserError(
321
+ '`StructuredDict` does not currently support recursive `$ref`s and `$defs`. See https://github.com/pydantic/pydantic/issues/12145 for more information.'
322
+ )
323
+
314
324
  if name:
315
325
  json_schema['title'] = name
316
326
 
@@ -6,8 +6,8 @@ from textwrap import dedent
6
6
 
7
7
  from typing_extensions import Self
8
8
 
9
+ from .._json_schema import InlineDefsJsonSchemaTransformer, JsonSchemaTransformer
9
10
  from ..output import StructuredOutputMode
10
- from ._json_schema import InlineDefsJsonSchemaTransformer, JsonSchemaTransformer
11
11
 
12
12
  __all__ = [
13
13
  'ModelProfile',
@@ -4,8 +4,8 @@ import warnings
4
4
 
5
5
  from pydantic_ai.exceptions import UserError
6
6
 
7
+ from .._json_schema import JsonSchema, JsonSchemaTransformer
7
8
  from . import ModelProfile
8
- from ._json_schema import JsonSchema, JsonSchemaTransformer
9
9
 
10
10
 
11
11
  def google_model_profile(model_name: str) -> ModelProfile | None:
@@ -6,8 +6,8 @@ from collections.abc import Sequence
6
6
  from dataclasses import dataclass
7
7
  from typing import Any, Literal
8
8
 
9
+ from .._json_schema import JsonSchema, JsonSchemaTransformer
9
10
  from . import ModelProfile
10
- from ._json_schema import JsonSchema, JsonSchemaTransformer
11
11
 
12
12
  OpenAISystemPromptRole = Literal['system', 'developer', 'user']
13
13
 
@@ -8,7 +8,7 @@ from __future__ import annotations as _annotations
8
8
  from abc import ABC, abstractmethod
9
9
  from typing import Any, Generic, TypeVar
10
10
 
11
- from pydantic_ai.profiles import ModelProfile
11
+ from pydantic_ai import ModelProfile
12
12
 
13
13
  InterfaceClient = TypeVar('InterfaceClient')
14
14
 
@@ -5,9 +5,9 @@ from typing import TypeAlias, overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.anthropic import anthropic_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.cohere import cohere_model_profile
13
13
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
14
14
  from pydantic_ai.profiles.grok import grok_model_profile
@@ -6,8 +6,8 @@ from collections.abc import Callable
6
6
  from dataclasses import dataclass
7
7
  from typing import Literal, overload
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.amazon import amazon_model_profile
12
12
  from pydantic_ai.profiles.anthropic import anthropic_model_profile
13
13
  from pydantic_ai.profiles.cohere import cohere_model_profile
@@ -5,9 +5,9 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.harmony import harmony_model_profile
12
12
  from pydantic_ai.profiles.meta import meta_model_profile
13
13
  from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
@@ -4,9 +4,9 @@ import os
4
4
 
5
5
  import httpx
6
6
 
7
+ from pydantic_ai import ModelProfile
7
8
  from pydantic_ai.exceptions import UserError
8
9
  from pydantic_ai.models import cached_async_http_client
9
- from pydantic_ai.profiles import ModelProfile
10
10
  from pydantic_ai.profiles.cohere import cohere_model_profile
11
11
  from pydantic_ai.providers import Provider
12
12
 
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
13
13
  from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
14
14
  from pydantic_ai.providers import Provider
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
13
13
  from pydantic_ai.profiles.google import google_model_profile
14
14
  from pydantic_ai.profiles.meta import meta_model_profile
@@ -5,9 +5,9 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.cohere import cohere_model_profile
12
12
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
13
13
  from pydantic_ai.profiles.grok import grok_model_profile
@@ -5,9 +5,9 @@ from typing import Literal, overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import get_user_agent
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.google import google_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
@@ -5,9 +5,9 @@ import os
5
5
  import httpx
6
6
  from typing_extensions import deprecated
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.google import google_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
@@ -10,9 +10,9 @@ import anyio.to_thread
10
10
  import httpx
11
11
  from typing_extensions import deprecated
12
12
 
13
+ from pydantic_ai import ModelProfile
13
14
  from pydantic_ai.exceptions import UserError
14
15
  from pydantic_ai.models import cached_async_http_client
15
- from pydantic_ai.profiles import ModelProfile
16
16
  from pydantic_ai.profiles.google import google_model_profile
17
17
  from pydantic_ai.providers import Provider
18
18
 
@@ -6,9 +6,9 @@ from typing import Literal, overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.grok import grok_model_profile
13
13
  from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
14
14
  from pydantic_ai.providers import Provider
@@ -5,9 +5,9 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
12
12
  from pydantic_ai.profiles.google import google_model_profile
13
13
  from pydantic_ai.profiles.groq import groq_model_profile
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
13
13
  from pydantic_ai.providers import Provider
14
14
 
@@ -5,8 +5,8 @@ from typing import overload
5
5
 
6
6
  from httpx import AsyncClient
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
- from pydantic_ai.profiles import ModelProfile
10
10
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
11
11
  from pydantic_ai.profiles.google import google_model_profile
12
12
  from pydantic_ai.profiles.meta import meta_model_profile
@@ -5,8 +5,8 @@ from typing import overload
5
5
  from httpx import AsyncClient as AsyncHTTPClient
6
6
  from openai import AsyncOpenAI
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.models import cached_async_http_client
9
- from pydantic_ai.profiles import ModelProfile
10
10
  from pydantic_ai.profiles.amazon import amazon_model_profile
11
11
  from pydantic_ai.profiles.anthropic import anthropic_model_profile
12
12
  from pydantic_ai.profiles.cohere import cohere_model_profile
@@ -5,9 +5,9 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.mistral import mistral_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
@@ -6,9 +6,9 @@ from typing import Literal, overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.moonshotai import moonshotai_model_profile
13
13
  from pydantic_ai.profiles.openai import (
14
14
  OpenAIJsonSchemaTransformer,
@@ -5,9 +5,9 @@ import os
5
5
  import httpx
6
6
  from openai import AsyncOpenAI
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.cohere import cohere_model_profile
12
12
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
13
13
  from pydantic_ai.profiles.google import google_model_profile
@@ -5,8 +5,8 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.models import cached_async_http_client
9
- from pydantic_ai.profiles import ModelProfile
10
10
  from pydantic_ai.profiles.openai import openai_model_profile
11
11
  from pydantic_ai.providers import Provider
12
12
 
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.amazon import amazon_model_profile
13
13
  from pydantic_ai.profiles.anthropic import anthropic_model_profile
14
14
  from pydantic_ai.profiles.cohere import cohere_model_profile
@@ -6,9 +6,9 @@ from typing import overload
6
6
  import httpx
7
7
  from openai import AsyncOpenAI
8
8
 
9
+ from pydantic_ai import ModelProfile
9
10
  from pydantic_ai.exceptions import UserError
10
11
  from pydantic_ai.models import cached_async_http_client
11
- from pydantic_ai.profiles import ModelProfile
12
12
  from pydantic_ai.profiles.deepseek import deepseek_model_profile
13
13
  from pydantic_ai.profiles.google import google_model_profile
14
14
  from pydantic_ai.profiles.meta import meta_model_profile
@@ -5,9 +5,9 @@ from typing import overload
5
5
 
6
6
  import httpx
7
7
 
8
+ from pydantic_ai import ModelProfile
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.models import cached_async_http_client
10
- from pydantic_ai.profiles import ModelProfile
11
11
  from pydantic_ai.profiles.amazon import amazon_model_profile
12
12
  from pydantic_ai.profiles.anthropic import anthropic_model_profile
13
13
  from pydantic_ai.profiles.cohere import cohere_model_profile
@@ -148,8 +148,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
148
148
 
149
149
  Example:
150
150
  ```python
151
- from pydantic_ai import Agent, RunContext
152
- from pydantic_ai.toolsets.function import FunctionToolset
151
+ from pydantic_ai import Agent, FunctionToolset, RunContext
153
152
 
154
153
  toolset = FunctionToolset()
155
154
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.11
3
+ Version: 1.0.13
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.28
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.11
36
+ Requires-Dist: pydantic-graph==1.0.13
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.11; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.13; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq