pydantic-ai-slim 0.4.1__tar.gz → 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/__init__.py +2 -1
  3. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_a2a.py +3 -4
  4. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_output.py +10 -6
  5. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_utils.py +6 -1
  6. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/agent.py +12 -10
  7. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/__init__.py +21 -0
  8. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/anthropic.py +4 -1
  9. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/bedrock.py +4 -1
  10. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/cohere.py +4 -1
  11. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/fallback.py +1 -0
  12. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/function.py +13 -2
  13. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/gemini.py +11 -8
  14. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/google.py +4 -1
  15. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/groq.py +4 -1
  16. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/instrumented.py +11 -11
  17. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/mistral.py +4 -1
  18. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/openai.py +14 -2
  19. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/test.py +22 -1
  20. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/wrapper.py +6 -0
  21. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/output.py +65 -1
  22. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pyproject.toml +1 -1
  23. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/.gitignore +0 -0
  24. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/LICENSE +0 -0
  25. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/README.md +0 -0
  26. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/__main__.py +0 -0
  27. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_agent_graph.py +0 -0
  28. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_cli.py +0 -0
  29. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_function_schema.py +0 -0
  30. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_griffe.py +0 -0
  31. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_mcp.py +0 -0
  32. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_parts_manager.py +0 -0
  33. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_run_context.py +0 -0
  34. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_system_prompt.py +0 -0
  35. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/_thinking_part.py +0 -0
  36. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/__init__.py +0 -0
  37. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  38. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/tavily.py +0 -0
  39. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/direct.py +0 -0
  40. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/exceptions.py +0 -0
  41. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/__init__.py +0 -0
  42. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/aci.py +0 -0
  43. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/langchain.py +0 -0
  44. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/format_as_xml.py +0 -0
  45. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/format_prompt.py +0 -0
  46. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/mcp.py +0 -0
  47. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/messages.py +0 -0
  48. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/mcp_sampling.py +0 -0
  49. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/__init__.py +0 -0
  50. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/_json_schema.py +0 -0
  51. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/amazon.py +0 -0
  52. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/anthropic.py +0 -0
  53. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/cohere.py +0 -0
  54. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/deepseek.py +0 -0
  55. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/google.py +0 -0
  56. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/grok.py +0 -0
  57. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/meta.py +0 -0
  58. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/mistral.py +0 -0
  59. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/openai.py +0 -0
  60. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/qwen.py +0 -0
  61. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/__init__.py +0 -0
  62. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/anthropic.py +0 -0
  63. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/azure.py +0 -0
  64. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/bedrock.py +0 -0
  65. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/cohere.py +0 -0
  66. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/deepseek.py +0 -0
  67. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/fireworks.py +0 -0
  68. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/github.py +0 -0
  69. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google.py +0 -0
  70. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google_gla.py +0 -0
  71. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google_vertex.py +0 -0
  72. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/grok.py +0 -0
  73. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/groq.py +0 -0
  74. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/heroku.py +0 -0
  75. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/mistral.py +0 -0
  76. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/openai.py +0 -0
  77. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/openrouter.py +0 -0
  78. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/together.py +0 -0
  79. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/py.typed +0 -0
  80. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/result.py +0 -0
  81. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/settings.py +0 -0
  82. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/tools.py +0 -0
  83. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.2}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.4.1
3
+ Version: 0.4.2
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.4.1
33
+ Requires-Dist: pydantic-graph==0.4.2
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.4.1; extra == 'a2a'
37
+ Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.4.1; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.4.2; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.24.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -12,7 +12,7 @@ from .exceptions import (
12
12
  )
13
13
  from .format_prompt import format_as_xml
14
14
  from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, VideoUrl
15
- from .output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
15
+ from .output import NativeOutput, PromptedOutput, StructuredDict, TextOutput, ToolOutput
16
16
  from .tools import RunContext, Tool
17
17
 
18
18
  __all__ = (
@@ -46,6 +46,7 @@ __all__ = (
46
46
  'NativeOutput',
47
47
  'PromptedOutput',
48
48
  'TextOutput',
49
+ 'StructuredDict',
49
50
  # format_prompt
50
51
  'format_as_xml',
51
52
  )
@@ -33,10 +33,6 @@ from .agent import Agent, AgentDepsT, OutputDataT
33
33
  WorkerOutputT = TypeVar('WorkerOutputT')
34
34
 
35
35
  try:
36
- from starlette.middleware import Middleware
37
- from starlette.routing import Route
38
- from starlette.types import ExceptionHandler, Lifespan
39
-
40
36
  from fasta2a.applications import FastA2A
41
37
  from fasta2a.broker import Broker, InMemoryBroker
42
38
  from fasta2a.schema import (
@@ -52,6 +48,9 @@ try:
52
48
  )
53
49
  from fasta2a.storage import InMemoryStorage, Storage
54
50
  from fasta2a.worker import Worker
51
+ from starlette.middleware import Middleware
52
+ from starlette.routing import Route
53
+ from starlette.types import ExceptionHandler, Lifespan
55
54
  except ImportError as _import_error:
56
55
  raise ImportError(
57
56
  'Please install the `fasta2a` package to use `Agent.to_a2a()` method, '
@@ -264,10 +264,16 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
264
264
 
265
265
  output = output.output
266
266
 
267
+ description = description or default_description
268
+ if strict is None:
269
+ strict = default_strict
270
+
271
+ processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
272
+
267
273
  if name is None:
268
274
  name = default_name
269
275
  if multiple:
270
- name += f'_{output.__name__}'
276
+ name += f'_{processor.object_def.name}'
271
277
 
272
278
  i = 1
273
279
  original_name = name
@@ -275,11 +281,6 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
275
281
  i += 1
276
282
  name = f'{original_name}_{i}'
277
283
 
278
- description = description or default_description
279
- if strict is None:
280
- strict = default_strict
281
-
282
- processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
283
284
  tools[name] = OutputTool(name=name, processor=processor, multiple=multiple)
284
285
 
285
286
  return tools
@@ -616,6 +617,9 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
616
617
  # including `response_data_typed_dict` as a title here doesn't add anything and could confuse the LLM
617
618
  json_schema.pop('title')
618
619
 
620
+ if name is None and (json_schema_title := json_schema.get('title', None)):
621
+ name = json_schema_title
622
+
619
623
  if json_schema_description := json_schema.pop('description', None):
620
624
  if description is None:
621
625
  description = json_schema_description
@@ -60,7 +60,12 @@ def is_model_like(type_: Any) -> bool:
60
60
  return (
61
61
  isinstance(type_, type)
62
62
  and not isinstance(type_, GenericAlias)
63
- and (issubclass(type_, BaseModel) or is_dataclass(type_) or is_typeddict(type_)) # pyright: ignore[reportUnknownArgumentType]
63
+ and (
64
+ issubclass(type_, BaseModel)
65
+ or is_dataclass(type_) # pyright: ignore[reportUnknownArgumentType]
66
+ or is_typeddict(type_) # pyright: ignore[reportUnknownArgumentType]
67
+ or getattr(type_, '__is_model_like__', False) # pyright: ignore[reportUnknownArgumentType]
68
+ )
64
69
  )
65
70
 
66
71
 
@@ -57,14 +57,14 @@ ModelRequestNode = _agent_graph.ModelRequestNode
57
57
  UserPromptNode = _agent_graph.UserPromptNode
58
58
 
59
59
  if TYPE_CHECKING:
60
- from starlette.middleware import Middleware
61
- from starlette.routing import Route
62
- from starlette.types import ExceptionHandler, Lifespan
63
-
64
60
  from fasta2a.applications import FastA2A
65
61
  from fasta2a.broker import Broker
66
62
  from fasta2a.schema import AgentProvider, Skill
67
63
  from fasta2a.storage import Storage
64
+ from starlette.middleware import Middleware
65
+ from starlette.routing import Route
66
+ from starlette.types import ExceptionHandler, Lifespan
67
+
68
68
  from pydantic_ai.mcp import MCPServer
69
69
 
70
70
 
@@ -500,7 +500,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
500
500
  @overload
501
501
  def iter(
502
502
  self,
503
- user_prompt: str | Sequence[_messages.UserContent] | None,
503
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
504
504
  *,
505
505
  output_type: None = None,
506
506
  message_history: list[_messages.ModelMessage] | None = None,
@@ -516,7 +516,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
516
516
  @overload
517
517
  def iter(
518
518
  self,
519
- user_prompt: str | Sequence[_messages.UserContent] | None,
519
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
520
520
  *,
521
521
  output_type: OutputSpec[RunOutputDataT],
522
522
  message_history: list[_messages.ModelMessage] | None = None,
@@ -533,7 +533,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
533
533
  @deprecated('`result_type` is deprecated, use `output_type` instead.')
534
534
  def iter(
535
535
  self,
536
- user_prompt: str | Sequence[_messages.UserContent] | None,
536
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
537
537
  *,
538
538
  result_type: type[RunOutputDataT],
539
539
  message_history: list[_messages.ModelMessage] | None = None,
@@ -674,12 +674,14 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
674
674
  # typecast reasonable, even though it is possible to violate it with otherwise-type-checked code.
675
675
  output_validators = cast(list[_output.OutputValidator[AgentDepsT, RunOutputDataT]], self._output_validators)
676
676
 
677
- model_settings = merge_model_settings(self.model_settings, model_settings)
677
+ # Merge model settings in order of precedence: run > agent > model
678
+ merged_settings = merge_model_settings(model_used.settings, self.model_settings)
679
+ model_settings = merge_model_settings(merged_settings, model_settings)
678
680
  usage_limits = usage_limits or _usage.UsageLimits()
679
681
 
680
682
  if isinstance(model_used, InstrumentedModel):
681
- instrumentation_settings = model_used.settings
682
- tracer = model_used.settings.tracer
683
+ instrumentation_settings = model_used.instrumentation_settings
684
+ tracer = model_used.instrumentation_settings.tracer
683
685
  else:
684
686
  instrumentation_settings = None
685
687
  tracer = NoOpTracer()
@@ -321,6 +321,27 @@ class Model(ABC):
321
321
  """Abstract class for a model."""
322
322
 
323
323
  _profile: ModelProfileSpec | None = None
324
+ _settings: ModelSettings | None = None
325
+
326
+ def __init__(
327
+ self,
328
+ *,
329
+ settings: ModelSettings | None = None,
330
+ profile: ModelProfileSpec | None = None,
331
+ ) -> None:
332
+ """Initialize the model with optional settings and profile.
333
+
334
+ Args:
335
+ settings: Model-specific settings that will be used as defaults for this model.
336
+ profile: The model profile to use.
337
+ """
338
+ self._settings = settings
339
+ self._profile = profile
340
+
341
+ @property
342
+ def settings(self) -> ModelSettings | None:
343
+ """Get the model settings."""
344
+ return self._settings
324
345
 
325
346
  @abstractmethod
326
347
  async def request(
@@ -127,6 +127,7 @@ class AnthropicModel(Model):
127
127
  *,
128
128
  provider: Literal['anthropic'] | Provider[AsyncAnthropic] = 'anthropic',
129
129
  profile: ModelProfileSpec | None = None,
130
+ settings: ModelSettings | None = None,
130
131
  ):
131
132
  """Initialize an Anthropic model.
132
133
 
@@ -136,13 +137,15 @@ class AnthropicModel(Model):
136
137
  provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
137
138
  instance of `Provider[AsyncAnthropic]`. If not provided, the other parameters will be used.
138
139
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
140
+ settings: Default model settings for this model instance.
139
141
  """
140
142
  self._model_name = model_name
141
143
 
142
144
  if isinstance(provider, str):
143
145
  provider = infer_provider(provider)
144
146
  self.client = provider.client
145
- self._profile = profile or provider.model_profile
147
+
148
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
146
149
 
147
150
  @property
148
151
  def base_url(self) -> str:
@@ -202,6 +202,7 @@ class BedrockConverseModel(Model):
202
202
  *,
203
203
  provider: Literal['bedrock'] | Provider[BaseClient] = 'bedrock',
204
204
  profile: ModelProfileSpec | None = None,
205
+ settings: ModelSettings | None = None,
205
206
  ):
206
207
  """Initialize a Bedrock model.
207
208
 
@@ -213,13 +214,15 @@ class BedrockConverseModel(Model):
213
214
  'bedrock' or an instance of `Provider[BaseClient]`. If not provided, a new provider will be
214
215
  created using the other parameters.
215
216
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
217
+ settings: Model-specific settings that will be used as defaults for this model.
216
218
  """
217
219
  self._model_name = model_name
218
220
 
219
221
  if isinstance(provider, str):
220
222
  provider = infer_provider(provider)
221
223
  self.client = cast('BedrockRuntimeClient', provider.client)
222
- self._profile = profile or provider.model_profile
224
+
225
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
223
226
 
224
227
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolTypeDef]:
225
228
  tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
@@ -111,6 +111,7 @@ class CohereModel(Model):
111
111
  *,
112
112
  provider: Literal['cohere'] | Provider[AsyncClientV2] = 'cohere',
113
113
  profile: ModelProfileSpec | None = None,
114
+ settings: ModelSettings | None = None,
114
115
  ):
115
116
  """Initialize an Cohere model.
116
117
 
@@ -121,13 +122,15 @@ class CohereModel(Model):
121
122
  'cohere' or an instance of `Provider[AsyncClientV2]`. If not provided, a new provider will be
122
123
  created using the other parameters.
123
124
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
125
+ settings: Model-specific settings that will be used as defaults for this model.
124
126
  """
125
127
  self._model_name = model_name
126
128
 
127
129
  if isinstance(provider, str):
128
130
  provider = infer_provider(provider)
129
131
  self.client = provider.client
130
- self._profile = profile or provider.model_profile
132
+
133
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
131
134
 
132
135
  @property
133
136
  def base_url(self) -> str:
@@ -42,6 +42,7 @@ class FallbackModel(Model):
42
42
  fallback_models: The names or instances of the fallback models to use upon failure.
43
43
  fallback_on: A callable or tuple of exceptions that should trigger a fallback.
44
44
  """
45
+ super().__init__()
45
46
  self.models = [infer_model(default_model), *[infer_model(m) for m in fallback_models]]
46
47
 
47
48
  if isinstance(fallback_on, tuple):
@@ -52,7 +52,12 @@ class FunctionModel(Model):
52
52
 
53
53
  @overload
54
54
  def __init__(
55
- self, function: FunctionDef, *, model_name: str | None = None, profile: ModelProfileSpec | None = None
55
+ self,
56
+ function: FunctionDef,
57
+ *,
58
+ model_name: str | None = None,
59
+ profile: ModelProfileSpec | None = None,
60
+ settings: ModelSettings | None = None,
56
61
  ) -> None: ...
57
62
 
58
63
  @overload
@@ -62,6 +67,7 @@ class FunctionModel(Model):
62
67
  stream_function: StreamFunctionDef,
63
68
  model_name: str | None = None,
64
69
  profile: ModelProfileSpec | None = None,
70
+ settings: ModelSettings | None = None,
65
71
  ) -> None: ...
66
72
 
67
73
  @overload
@@ -72,6 +78,7 @@ class FunctionModel(Model):
72
78
  stream_function: StreamFunctionDef,
73
79
  model_name: str | None = None,
74
80
  profile: ModelProfileSpec | None = None,
81
+ settings: ModelSettings | None = None,
75
82
  ) -> None: ...
76
83
 
77
84
  def __init__(
@@ -81,6 +88,7 @@ class FunctionModel(Model):
81
88
  stream_function: StreamFunctionDef | None = None,
82
89
  model_name: str | None = None,
83
90
  profile: ModelProfileSpec | None = None,
91
+ settings: ModelSettings | None = None,
84
92
  ):
85
93
  """Initialize a `FunctionModel`.
86
94
 
@@ -91,16 +99,19 @@ class FunctionModel(Model):
91
99
  stream_function: The function to call for streamed requests.
92
100
  model_name: The name of the model. If not provided, a name is generated from the function names.
93
101
  profile: The model profile to use.
102
+ settings: Model-specific settings that will be used as defaults for this model.
94
103
  """
95
104
  if function is None and stream_function is None:
96
105
  raise TypeError('Either `function` or `stream_function` must be provided')
106
+
97
107
  self.function = function
98
108
  self.stream_function = stream_function
99
109
 
100
110
  function_name = self.function.__name__ if self.function is not None else ''
101
111
  stream_function_name = self.stream_function.__name__ if self.stream_function is not None else ''
102
112
  self._model_name = model_name or f'function:{function_name}:{stream_function_name}'
103
- self._profile = profile
113
+
114
+ super().__init__(settings=settings, profile=profile)
104
115
 
105
116
  async def request(
106
117
  self,
@@ -133,6 +133,7 @@ class GeminiModel(Model):
133
133
  *,
134
134
  provider: Literal['google-gla', 'google-vertex'] | Provider[httpx.AsyncClient] = 'google-gla',
135
135
  profile: ModelProfileSpec | None = None,
136
+ settings: ModelSettings | None = None,
136
137
  ):
137
138
  """Initialize a Gemini model.
138
139
 
@@ -142,6 +143,7 @@ class GeminiModel(Model):
142
143
  'google-gla' or 'google-vertex' or an instance of `Provider[httpx.AsyncClient]`.
143
144
  If not provided, a new provider will be created using the other parameters.
144
145
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
146
+ settings: Default model settings for this model instance.
145
147
  """
146
148
  self._model_name = model_name
147
149
  self._provider = provider
@@ -151,7 +153,8 @@ class GeminiModel(Model):
151
153
  self._system = provider.name
152
154
  self.client = provider.client
153
155
  self._url = str(self.client.base_url)
154
- self._profile = profile or provider.model_profile
156
+
157
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
155
158
 
156
159
  @property
157
160
  def base_url(self) -> str:
@@ -921,10 +924,10 @@ def _ensure_decodeable(content: bytearray) -> bytearray:
921
924
 
922
925
  This is a temporary workaround until https://github.com/pydantic/pydantic-core/issues/1633 is resolved
923
926
  """
924
- while True:
925
- try:
926
- content.decode()
927
- except UnicodeDecodeError:
928
- content = content[:-1] # this will definitely succeed before we run out of bytes
929
- else:
930
- return content
927
+ try:
928
+ content.decode()
929
+ except UnicodeDecodeError as e:
930
+ # e.start marks the start of the invalid decoded bytes, so cut up to before the first invalid byte
931
+ return content[: e.start]
932
+ else:
933
+ return content
@@ -151,6 +151,7 @@ class GoogleModel(Model):
151
151
  *,
152
152
  provider: Literal['google-gla', 'google-vertex'] | Provider[genai.Client] = 'google-gla',
153
153
  profile: ModelProfileSpec | None = None,
154
+ settings: ModelSettings | None = None,
154
155
  ):
155
156
  """Initialize a Gemini model.
156
157
 
@@ -160,6 +161,7 @@ class GoogleModel(Model):
160
161
  'google-gla' or 'google-vertex' or an instance of `Provider[httpx.AsyncClient]`.
161
162
  If not provided, a new provider will be created using the other parameters.
162
163
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
164
+ settings: The model settings to use. Defaults to None.
163
165
  """
164
166
  self._model_name = model_name
165
167
 
@@ -169,7 +171,8 @@ class GoogleModel(Model):
169
171
  self._provider = provider
170
172
  self._system = provider.name
171
173
  self.client = provider.client
172
- self._profile = profile or provider.model_profile
174
+
175
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
173
176
 
174
177
  @property
175
178
  def base_url(self) -> str:
@@ -120,6 +120,7 @@ class GroqModel(Model):
120
120
  *,
121
121
  provider: Literal['groq'] | Provider[AsyncGroq] = 'groq',
122
122
  profile: ModelProfileSpec | None = None,
123
+ settings: ModelSettings | None = None,
123
124
  ):
124
125
  """Initialize a Groq model.
125
126
 
@@ -130,13 +131,15 @@ class GroqModel(Model):
130
131
  'groq' or an instance of `Provider[AsyncGroq]`. If not provided, a new provider will be
131
132
  created using the other parameters.
132
133
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
134
+ settings: Model-specific settings that will be used as defaults for this model.
133
135
  """
134
136
  self._model_name = model_name
135
137
 
136
138
  if isinstance(provider, str):
137
139
  provider = infer_provider(provider)
138
140
  self.client = provider.client
139
- self._profile = profile or provider.model_profile
141
+
142
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
140
143
 
141
144
  @property
142
145
  def base_url(self) -> str:
@@ -182,15 +182,15 @@ GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'
182
182
  GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
183
183
 
184
184
 
185
- @dataclass
185
+ @dataclass(init=False)
186
186
  class InstrumentedModel(WrapperModel):
187
187
  """Model which wraps another model so that requests are instrumented with OpenTelemetry.
188
188
 
189
189
  See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info.
190
190
  """
191
191
 
192
- settings: InstrumentationSettings
193
- """Configuration for instrumenting requests."""
192
+ instrumentation_settings: InstrumentationSettings
193
+ """Instrumentation settings for this model."""
194
194
 
195
195
  def __init__(
196
196
  self,
@@ -198,7 +198,7 @@ class InstrumentedModel(WrapperModel):
198
198
  options: InstrumentationSettings | None = None,
199
199
  ) -> None:
200
200
  super().__init__(wrapped)
201
- self.settings = options or InstrumentationSettings()
201
+ self.instrumentation_settings = options or InstrumentationSettings()
202
202
 
203
203
  async def request(
204
204
  self,
@@ -260,7 +260,7 @@ class InstrumentedModel(WrapperModel):
260
260
 
261
261
  record_metrics: Callable[[], None] | None = None
262
262
  try:
263
- with self.settings.tracer.start_as_current_span(span_name, attributes=attributes) as span:
263
+ with self.instrumentation_settings.tracer.start_as_current_span(span_name, attributes=attributes) as span:
264
264
 
265
265
  def finish(response: ModelResponse):
266
266
  # FallbackModel updates these span attributes.
@@ -278,12 +278,12 @@ class InstrumentedModel(WrapperModel):
278
278
  'gen_ai.response.model': response_model,
279
279
  }
280
280
  if response.usage.request_tokens: # pragma: no branch
281
- self.settings.tokens_histogram.record(
281
+ self.instrumentation_settings.tokens_histogram.record(
282
282
  response.usage.request_tokens,
283
283
  {**metric_attributes, 'gen_ai.token.type': 'input'},
284
284
  )
285
285
  if response.usage.response_tokens: # pragma: no branch
286
- self.settings.tokens_histogram.record(
286
+ self.instrumentation_settings.tokens_histogram.record(
287
287
  response.usage.response_tokens,
288
288
  {**metric_attributes, 'gen_ai.token.type': 'output'},
289
289
  )
@@ -294,8 +294,8 @@ class InstrumentedModel(WrapperModel):
294
294
  if not span.is_recording():
295
295
  return
296
296
 
297
- events = self.settings.messages_to_otel_events(messages)
298
- for event in self.settings.messages_to_otel_events([response]):
297
+ events = self.instrumentation_settings.messages_to_otel_events(messages)
298
+ for event in self.instrumentation_settings.messages_to_otel_events([response]):
299
299
  events.append(
300
300
  Event(
301
301
  'gen_ai.choice',
@@ -328,9 +328,9 @@ class InstrumentedModel(WrapperModel):
328
328
  record_metrics()
329
329
 
330
330
  def _emit_events(self, span: Span, events: list[Event]) -> None:
331
- if self.settings.event_mode == 'logs':
331
+ if self.instrumentation_settings.event_mode == 'logs':
332
332
  for event in events:
333
- self.settings.event_logger.emit(event)
333
+ self.instrumentation_settings.event_logger.emit(event)
334
334
  else:
335
335
  attr_name = 'events'
336
336
  span.set_attributes(
@@ -125,6 +125,7 @@ class MistralModel(Model):
125
125
  provider: Literal['mistral'] | Provider[Mistral] = 'mistral',
126
126
  profile: ModelProfileSpec | None = None,
127
127
  json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",
128
+ settings: ModelSettings | None = None,
128
129
  ):
129
130
  """Initialize a Mistral model.
130
131
 
@@ -135,6 +136,7 @@ class MistralModel(Model):
135
136
  created using the other parameters.
136
137
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
137
138
  json_mode_schema_prompt: The prompt to show when the model expects a JSON object as input.
139
+ settings: Model-specific settings that will be used as defaults for this model.
138
140
  """
139
141
  self._model_name = model_name
140
142
  self.json_mode_schema_prompt = json_mode_schema_prompt
@@ -142,7 +144,8 @@ class MistralModel(Model):
142
144
  if isinstance(provider, str):
143
145
  provider = infer_provider(provider)
144
146
  self.client = provider.client
145
- self._profile = profile or provider.model_profile
147
+
148
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
146
149
 
147
150
  @property
148
151
  def base_url(self) -> str:
@@ -195,6 +195,7 @@ class OpenAIModel(Model):
195
195
  | Provider[AsyncOpenAI] = 'openai',
196
196
  profile: ModelProfileSpec | None = None,
197
197
  system_prompt_role: OpenAISystemPromptRole | None = None,
198
+ settings: ModelSettings | None = None,
198
199
  ):
199
200
  """Initialize an OpenAI model.
200
201
 
@@ -206,16 +207,18 @@ class OpenAIModel(Model):
206
207
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
207
208
  system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`.
208
209
  In the future, this may be inferred from the model name.
210
+ settings: Default model settings for this model instance.
209
211
  """
210
212
  self._model_name = model_name
211
213
 
212
214
  if isinstance(provider, str):
213
215
  provider = infer_provider(provider)
214
216
  self.client = provider.client
215
- self._profile = profile or provider.model_profile
216
217
 
217
218
  self.system_prompt_role = system_prompt_role
218
219
 
220
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
221
+
219
222
  @property
220
223
  def base_url(self) -> str:
221
224
  return str(self.client.base_url)
@@ -598,6 +601,7 @@ class OpenAIResponsesModel(Model):
598
601
  provider: Literal['openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together']
599
602
  | Provider[AsyncOpenAI] = 'openai',
600
603
  profile: ModelProfileSpec | None = None,
604
+ settings: ModelSettings | None = None,
601
605
  ):
602
606
  """Initialize an OpenAI Responses model.
603
607
 
@@ -605,13 +609,15 @@ class OpenAIResponsesModel(Model):
605
609
  model_name: The name of the OpenAI model to use.
606
610
  provider: The provider to use. Defaults to `'openai'`.
607
611
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
612
+ settings: Default model settings for this model instance.
608
613
  """
609
614
  self._model_name = model_name
610
615
 
611
616
  if isinstance(provider, str):
612
617
  provider = infer_provider(provider)
613
618
  self.client = provider.client
614
- self._profile = profile or provider.model_profile
619
+
620
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
615
621
 
616
622
  @property
617
623
  def model_name(self) -> OpenAIModelName:
@@ -988,6 +994,12 @@ class OpenAIStreamedResponse(StreamedResponse):
988
994
  if content is not None:
989
995
  yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=content)
990
996
 
997
+ # Handle reasoning part of the response, present in DeepSeek models
998
+ if reasoning_content := getattr(choice.delta, 'reasoning_content', None):
999
+ yield self._parts_manager.handle_thinking_delta(
1000
+ vendor_part_id='reasoning_content', content=reasoning_content
1001
+ )
1002
+
991
1003
  for dtc in choice.delta.tool_calls or []:
992
1004
  maybe_event = self._parts_manager.handle_tool_call_delta(
993
1005
  vendor_part_id=dtc.index,
@@ -24,6 +24,7 @@ from ..messages import (
24
24
  ToolCallPart,
25
25
  ToolReturnPart,
26
26
  )
27
+ from ..profiles import ModelProfileSpec
27
28
  from ..settings import ModelSettings
28
29
  from ..tools import ToolDefinition
29
30
  from ..usage import Usage
@@ -45,7 +46,7 @@ class _WrappedToolOutput:
45
46
  value: Any | None
46
47
 
47
48
 
48
- @dataclass
49
+ @dataclass(init=False)
49
50
  class TestModel(Model):
50
51
  """A model specifically for testing purposes.
51
52
 
@@ -79,6 +80,26 @@ class TestModel(Model):
79
80
  _model_name: str = field(default='test', repr=False)
80
81
  _system: str = field(default='test', repr=False)
81
82
 
83
+ def __init__(
84
+ self,
85
+ *,
86
+ call_tools: list[str] | Literal['all'] = 'all',
87
+ custom_output_text: str | None = None,
88
+ custom_output_args: Any | None = None,
89
+ seed: int = 0,
90
+ profile: ModelProfileSpec | None = None,
91
+ settings: ModelSettings | None = None,
92
+ ):
93
+ """Initialize TestModel with optional settings and profile."""
94
+ self.call_tools = call_tools
95
+ self.custom_output_text = custom_output_text
96
+ self.custom_output_args = custom_output_args
97
+ self.seed = seed
98
+ self.last_model_request_parameters = None
99
+ self._model_name = 'test'
100
+ self._system = 'test'
101
+ super().__init__(settings=settings, profile=profile)
102
+
82
103
  async def request(
83
104
  self,
84
105
  messages: list[ModelMessage],
@@ -23,6 +23,7 @@ class WrapperModel(Model):
23
23
  """The underlying model being wrapped."""
24
24
 
25
25
  def __init__(self, wrapped: Model | KnownModelName):
26
+ super().__init__()
26
27
  self.wrapped = infer_model(wrapped)
27
28
 
28
29
  async def request(self, *args: Any, **kwargs: Any) -> ModelResponse:
@@ -53,5 +54,10 @@ class WrapperModel(Model):
53
54
  def profile(self) -> ModelProfile:
54
55
  return self.wrapped.profile
55
56
 
57
+ @property
58
+ def settings(self) -> ModelSettings | None:
59
+ """Get the settings from the wrapped model."""
60
+ return self.wrapped.settings
61
+
56
62
  def __getattr__(self, item: str):
57
63
  return getattr(self.wrapped, item) # pragma: no cover
@@ -2,10 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  from collections.abc import Awaitable, Sequence
4
4
  from dataclasses import dataclass
5
- from typing import Callable, Generic, Literal, Union
5
+ from typing import Any, Callable, Generic, Literal, Union
6
6
 
7
+ from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler
8
+ from pydantic.json_schema import JsonSchemaValue
9
+ from pydantic_core import core_schema
7
10
  from typing_extensions import TypeAliasType, TypeVar
8
11
 
12
+ from . import _utils
9
13
  from .tools import RunContext
10
14
 
11
15
  __all__ = (
@@ -14,6 +18,7 @@ __all__ = (
14
18
  'NativeOutput',
15
19
  'PromptedOutput',
16
20
  'TextOutput',
21
+ 'StructuredDict',
17
22
  # types
18
23
  'OutputDataT',
19
24
  'OutputMode',
@@ -266,6 +271,65 @@ class TextOutput(Generic[OutputDataT]):
266
271
  """The function that will be called to process the model's plain text output. The function must take a single string argument."""
267
272
 
268
273
 
274
+ def StructuredDict(
275
+ json_schema: JsonSchemaValue, name: str | None = None, description: str | None = None
276
+ ) -> type[JsonSchemaValue]:
277
+ """Returns a `dict[str, Any]` subclass with a JSON schema attached that will be used for structured output.
278
+
279
+ Args:
280
+ json_schema: A JSON schema of type `object` defining the structure of the dictionary content.
281
+ name: Optional name of the structured output. If not provided, the `title` field of the JSON schema will be used if it's present.
282
+ description: Optional description of the structured output. If not provided, the `description` field of the JSON schema will be used if it's present.
283
+
284
+ Example:
285
+ ```python {title="structured_dict.py"}
286
+ from pydantic_ai import Agent, StructuredDict
287
+
288
+
289
+ schema = {
290
+ "type": "object",
291
+ "properties": {
292
+ "name": {"type": "string"},
293
+ "age": {"type": "integer"}
294
+ },
295
+ "required": ["name", "age"]
296
+ }
297
+
298
+ agent = Agent('openai:gpt-4o', output_type=StructuredDict(schema))
299
+ result = agent.run_sync("Create a person")
300
+ print(result.output)
301
+ #> {'name': 'John Doe', 'age': 30}
302
+ ```
303
+ """
304
+ json_schema = _utils.check_object_json_schema(json_schema)
305
+
306
+ if name:
307
+ json_schema['title'] = name
308
+
309
+ if description:
310
+ json_schema['description'] = description
311
+
312
+ class _StructuredDict(JsonSchemaValue):
313
+ __is_model_like__ = True
314
+
315
+ @classmethod
316
+ def __get_pydantic_core_schema__(
317
+ cls, source_type: Any, handler: GetCoreSchemaHandler
318
+ ) -> core_schema.CoreSchema:
319
+ return core_schema.dict_schema(
320
+ keys_schema=core_schema.str_schema(),
321
+ values_schema=core_schema.any_schema(),
322
+ )
323
+
324
+ @classmethod
325
+ def __get_pydantic_json_schema__(
326
+ cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
327
+ ) -> JsonSchemaValue:
328
+ return json_schema
329
+
330
+ return _StructuredDict
331
+
332
+
269
333
  OutputSpec = TypeAliasType(
270
334
  'OutputSpec',
271
335
  Union[
@@ -79,7 +79,7 @@ mcp = ["mcp>=1.9.4; python_version >= '3.10'"]
79
79
  # Evals
80
80
  evals = ["pydantic-evals=={{ version }}"]
81
81
  # A2A
82
- a2a = ["fasta2a=={{ version }}"]
82
+ a2a = ["fasta2a>=0.4.1"]
83
83
 
84
84
  [dependency-groups]
85
85
  dev = [