pydantic-ai-slim 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

pydantic_ai/__init__.py CHANGED
@@ -12,7 +12,7 @@ from .exceptions import (
12
12
  )
13
13
  from .format_prompt import format_as_xml
14
14
  from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, VideoUrl
15
- from .output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
15
+ from .output import NativeOutput, PromptedOutput, StructuredDict, TextOutput, ToolOutput
16
16
  from .tools import RunContext, Tool
17
17
 
18
18
  __all__ = (
@@ -46,6 +46,7 @@ __all__ = (
46
46
  'NativeOutput',
47
47
  'PromptedOutput',
48
48
  'TextOutput',
49
+ 'StructuredDict',
49
50
  # format_prompt
50
51
  'format_as_xml',
51
52
  )
pydantic_ai/_a2a.py CHANGED
@@ -1,11 +1,13 @@
1
1
  from __future__ import annotations, annotations as _annotations
2
2
 
3
+ import uuid
3
4
  from collections.abc import AsyncIterator, Sequence
4
5
  from contextlib import asynccontextmanager
5
6
  from dataclasses import dataclass
6
7
  from functools import partial
7
- from typing import Any, Generic
8
+ from typing import Any, Generic, TypeVar
8
9
 
10
+ from pydantic import TypeAdapter
9
11
  from typing_extensions import assert_never
10
12
 
11
13
  from pydantic_ai.messages import (
@@ -19,24 +21,26 @@ from pydantic_ai.messages import (
19
21
  ModelResponse,
20
22
  ModelResponsePart,
21
23
  TextPart,
24
+ ThinkingPart,
25
+ ToolCallPart,
22
26
  UserPromptPart,
23
27
  VideoUrl,
24
28
  )
25
29
 
26
30
  from .agent import Agent, AgentDepsT, OutputDataT
27
31
 
28
- try:
29
- from starlette.middleware import Middleware
30
- from starlette.routing import Route
31
- from starlette.types import ExceptionHandler, Lifespan
32
+ # AgentWorker output type needs to be invariant for use in both parameter and return positions
33
+ WorkerOutputT = TypeVar('WorkerOutputT')
32
34
 
35
+ try:
33
36
  from fasta2a.applications import FastA2A
34
37
  from fasta2a.broker import Broker, InMemoryBroker
35
38
  from fasta2a.schema import (
39
+ AgentProvider,
36
40
  Artifact,
41
+ DataPart,
37
42
  Message,
38
43
  Part,
39
- Provider,
40
44
  Skill,
41
45
  TaskIdParams,
42
46
  TaskSendParams,
@@ -44,6 +48,9 @@ try:
44
48
  )
45
49
  from fasta2a.storage import InMemoryStorage, Storage
46
50
  from fasta2a.worker import Worker
51
+ from starlette.middleware import Middleware
52
+ from starlette.routing import Route
53
+ from starlette.types import ExceptionHandler, Lifespan
47
54
  except ImportError as _import_error:
48
55
  raise ImportError(
49
56
  'Please install the `fasta2a` package to use `Agent.to_a2a()` method, '
@@ -72,7 +79,7 @@ def agent_to_a2a(
72
79
  url: str = 'http://localhost:8000',
73
80
  version: str = '1.0.0',
74
81
  description: str | None = None,
75
- provider: Provider | None = None,
82
+ provider: AgentProvider | None = None,
76
83
  skills: list[Skill] | None = None,
77
84
  # Starlette
78
85
  debug: bool = False,
@@ -106,59 +113,121 @@ def agent_to_a2a(
106
113
 
107
114
 
108
115
  @dataclass
109
- class AgentWorker(Worker, Generic[AgentDepsT, OutputDataT]):
116
+ class AgentWorker(Worker[list[ModelMessage]], Generic[WorkerOutputT, AgentDepsT]):
110
117
  """A worker that uses an agent to execute tasks."""
111
118
 
112
- agent: Agent[AgentDepsT, OutputDataT]
119
+ agent: Agent[AgentDepsT, WorkerOutputT]
113
120
 
114
121
  async def run_task(self, params: TaskSendParams) -> None:
115
- task = await self.storage.load_task(params['id'], history_length=params.get('history_length'))
116
- assert task is not None, f'Task {params["id"]} not found'
117
- assert 'session_id' in task, 'Task must have a session_id'
122
+ task = await self.storage.load_task(params['id'])
123
+ if task is None:
124
+ raise ValueError(f'Task {params["id"]} not found') # pragma: no cover
125
+
126
+ # TODO(Marcelo): Should we lock `run_task` on the `context_id`?
127
+ # Ensure this task hasn't been run before
128
+ if task['status']['state'] != 'submitted':
129
+ raise ValueError( # pragma: no cover
130
+ f'Task {params["id"]} has already been processed (state: {task["status"]["state"]})'
131
+ )
118
132
 
119
133
  await self.storage.update_task(task['id'], state='working')
120
134
 
121
- # TODO(Marcelo): We need to have a way to communicate when the task is set to `input-required`. Maybe
122
- # a custom `output_type` with a `more_info_required` field, or something like that.
135
+ # Load context - contains pydantic-ai message history from previous tasks in this conversation
136
+ message_history = await self.storage.load_context(task['context_id']) or []
137
+ message_history.extend(self.build_message_history(task.get('history', [])))
138
+
139
+ try:
140
+ result = await self.agent.run(message_history=message_history) # type: ignore
123
141
 
124
- task_history = task.get('history', [])
125
- message_history = self.build_message_history(task_history=task_history)
142
+ await self.storage.update_context(task['context_id'], result.all_messages())
126
143
 
127
- # TODO(Marcelo): We need to make this more customizable e.g. pass deps.
128
- result = await self.agent.run(message_history=message_history) # type: ignore
144
+ # Convert new messages to A2A format for task history
145
+ a2a_messages: list[Message] = []
129
146
 
130
- artifacts = self.build_artifacts(result.output)
131
- await self.storage.update_task(task['id'], state='completed', artifacts=artifacts)
147
+ for message in result.new_messages():
148
+ if isinstance(message, ModelRequest):
149
+ # Skip user prompts - they're already in task history
150
+ continue
151
+ else:
152
+ # Convert response parts to A2A format
153
+ a2a_parts = self._response_parts_to_a2a(message.parts)
154
+ if a2a_parts: # Add if there are visible parts (text/thinking)
155
+ a2a_messages.append(
156
+ Message(role='agent', parts=a2a_parts, kind='message', message_id=str(uuid.uuid4()))
157
+ )
158
+
159
+ artifacts = self.build_artifacts(result.output)
160
+ except Exception:
161
+ await self.storage.update_task(task['id'], state='failed')
162
+ raise
163
+ else:
164
+ await self.storage.update_task(
165
+ task['id'], state='completed', new_artifacts=artifacts, new_messages=a2a_messages
166
+ )
132
167
 
133
168
  async def cancel_task(self, params: TaskIdParams) -> None:
134
169
  pass
135
170
 
136
- def build_artifacts(self, result: Any) -> list[Artifact]:
137
- # TODO(Marcelo): We need to send the json schema of the result on the metadata of the message.
138
- return [Artifact(name='result', index=0, parts=[A2ATextPart(type='text', text=str(result))])]
171
+ def build_artifacts(self, result: WorkerOutputT) -> list[Artifact]:
172
+ """Build artifacts from agent result.
173
+
174
+ All agent outputs become artifacts to mark them as durable task outputs.
175
+ For string results, we use TextPart. For structured data, we use DataPart.
176
+ Metadata is included to preserve type information.
177
+ """
178
+ artifact_id = str(uuid.uuid4())
179
+ part = self._convert_result_to_part(result)
180
+ return [Artifact(artifact_id=artifact_id, name='result', parts=[part])]
139
181
 
140
- def build_message_history(self, task_history: list[Message]) -> list[ModelMessage]:
182
+ def _convert_result_to_part(self, result: WorkerOutputT) -> Part:
183
+ """Convert agent result to a Part (TextPart or DataPart).
184
+
185
+ For string results, returns a TextPart.
186
+ For structured data, returns a DataPart with properly serialized data.
187
+ """
188
+ if isinstance(result, str):
189
+ return A2ATextPart(kind='text', text=result)
190
+ else:
191
+ output_type = type(result)
192
+ type_adapter = TypeAdapter(output_type)
193
+ data = type_adapter.dump_python(result, mode='json')
194
+ json_schema = type_adapter.json_schema(mode='serialization')
195
+ return DataPart(kind='data', data={'result': data}, metadata={'json_schema': json_schema})
196
+
197
+ def build_message_history(self, history: list[Message]) -> list[ModelMessage]:
141
198
  model_messages: list[ModelMessage] = []
142
- for message in task_history:
199
+ for message in history:
143
200
  if message['role'] == 'user':
144
- model_messages.append(ModelRequest(parts=self._map_request_parts(message['parts'])))
201
+ model_messages.append(ModelRequest(parts=self._request_parts_from_a2a(message['parts'])))
145
202
  else:
146
- model_messages.append(ModelResponse(parts=self._map_response_parts(message['parts'])))
203
+ model_messages.append(ModelResponse(parts=self._response_parts_from_a2a(message['parts'])))
147
204
  return model_messages
148
205
 
149
- def _map_request_parts(self, parts: list[Part]) -> list[ModelRequestPart]:
206
+ def _request_parts_from_a2a(self, parts: list[Part]) -> list[ModelRequestPart]:
207
+ """Convert A2A Part objects to pydantic-ai ModelRequestPart objects.
208
+
209
+ This handles the conversion from A2A protocol parts (text, file, data) to
210
+ pydantic-ai's internal request parts (UserPromptPart with various content types).
211
+
212
+ Args:
213
+ parts: List of A2A Part objects from incoming messages
214
+
215
+ Returns:
216
+ List of ModelRequestPart objects for the pydantic-ai agent
217
+ """
150
218
  model_parts: list[ModelRequestPart] = []
151
219
  for part in parts:
152
- if part['type'] == 'text':
220
+ if part['kind'] == 'text':
153
221
  model_parts.append(UserPromptPart(content=part['text']))
154
- elif part['type'] == 'file':
155
- file = part['file']
156
- if 'data' in file:
157
- data = file['data'].encode('utf-8')
158
- content = BinaryContent(data=data, media_type=file['mime_type'])
222
+ elif part['kind'] == 'file':
223
+ file_content = part['file']
224
+ if 'bytes' in file_content:
225
+ data = file_content['bytes'].encode('utf-8')
226
+ mime_type = file_content.get('mime_type', 'application/octet-stream')
227
+ content = BinaryContent(data=data, media_type=mime_type)
159
228
  model_parts.append(UserPromptPart(content=[content]))
160
229
  else:
161
- url = file['url']
230
+ url = file_content['uri']
162
231
  for url_cls in (DocumentUrl, AudioUrl, ImageUrl, VideoUrl):
163
232
  content = url_cls(url=url)
164
233
  try:
@@ -168,24 +237,68 @@ class AgentWorker(Worker, Generic[AgentDepsT, OutputDataT]):
168
237
  else:
169
238
  break
170
239
  else:
171
- raise ValueError(f'Unknown file type: {file["mime_type"]}') # pragma: no cover
240
+ raise ValueError(f'Unsupported file type: {url}') # pragma: no cover
172
241
  model_parts.append(UserPromptPart(content=[content]))
173
- elif part['type'] == 'data':
174
- # TODO(Marcelo): Maybe we should use this for `ToolReturnPart`, and `RetryPromptPart`.
242
+ elif part['kind'] == 'data':
175
243
  raise NotImplementedError('Data parts are not supported yet.')
176
244
  else:
177
245
  assert_never(part)
178
246
  return model_parts
179
247
 
180
- def _map_response_parts(self, parts: list[Part]) -> list[ModelResponsePart]:
248
+ def _response_parts_from_a2a(self, parts: list[Part]) -> list[ModelResponsePart]:
249
+ """Convert A2A Part objects to pydantic-ai ModelResponsePart objects.
250
+
251
+ This handles the conversion from A2A protocol parts (text, file, data) to
252
+ pydantic-ai's internal response parts. Currently only supports text parts
253
+ as agent responses in A2A are expected to be text-based.
254
+
255
+ Args:
256
+ parts: List of A2A Part objects from stored agent messages
257
+
258
+ Returns:
259
+ List of ModelResponsePart objects for message history
260
+ """
181
261
  model_parts: list[ModelResponsePart] = []
182
262
  for part in parts:
183
- if part['type'] == 'text':
263
+ if part['kind'] == 'text':
184
264
  model_parts.append(TextPart(content=part['text']))
185
- elif part['type'] == 'file': # pragma: no cover
265
+ elif part['kind'] == 'file': # pragma: no cover
186
266
  raise NotImplementedError('File parts are not supported yet.')
187
- elif part['type'] == 'data': # pragma: no cover
267
+ elif part['kind'] == 'data': # pragma: no cover
188
268
  raise NotImplementedError('Data parts are not supported yet.')
189
269
  else: # pragma: no cover
190
270
  assert_never(part)
191
271
  return model_parts
272
+
273
+ def _response_parts_to_a2a(self, parts: list[ModelResponsePart]) -> list[Part]:
274
+ """Convert pydantic-ai ModelResponsePart objects to A2A Part objects.
275
+
276
+ This handles the conversion from pydantic-ai's internal response parts to
277
+ A2A protocol parts. Different part types are handled as follows:
278
+ - TextPart: Converted directly to A2A TextPart
279
+ - ThinkingPart: Converted to TextPart with metadata indicating it's thinking
280
+ - ToolCallPart: Skipped (internal to agent execution)
281
+
282
+ Args:
283
+ parts: List of ModelResponsePart objects from agent response
284
+
285
+ Returns:
286
+ List of A2A Part objects suitable for sending via A2A protocol
287
+ """
288
+ a2a_parts: list[Part] = []
289
+ for part in parts:
290
+ if isinstance(part, TextPart):
291
+ a2a_parts.append(A2ATextPart(kind='text', text=part.content))
292
+ elif isinstance(part, ThinkingPart):
293
+ # Convert thinking to text with metadata
294
+ a2a_parts.append(
295
+ A2ATextPart(
296
+ kind='text',
297
+ text=part.content,
298
+ metadata={'type': 'thinking', 'thinking_id': part.id, 'signature': part.signature},
299
+ )
300
+ )
301
+ elif isinstance(part, ToolCallPart):
302
+ # Skip tool calls - they're internal to agent execution
303
+ pass
304
+ return a2a_parts
pydantic_ai/_output.py CHANGED
@@ -264,10 +264,16 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
264
264
 
265
265
  output = output.output
266
266
 
267
+ description = description or default_description
268
+ if strict is None:
269
+ strict = default_strict
270
+
271
+ processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
272
+
267
273
  if name is None:
268
274
  name = default_name
269
275
  if multiple:
270
- name += f'_{output.__name__}'
276
+ name += f'_{processor.object_def.name}'
271
277
 
272
278
  i = 1
273
279
  original_name = name
@@ -275,11 +281,6 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
275
281
  i += 1
276
282
  name = f'{original_name}_{i}'
277
283
 
278
- description = description or default_description
279
- if strict is None:
280
- strict = default_strict
281
-
282
- processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
283
284
  tools[name] = OutputTool(name=name, processor=processor, multiple=multiple)
284
285
 
285
286
  return tools
@@ -616,6 +617,9 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
616
617
  # including `response_data_typed_dict` as a title here doesn't add anything and could confuse the LLM
617
618
  json_schema.pop('title')
618
619
 
620
+ if name is None and (json_schema_title := json_schema.get('title', None)):
621
+ name = json_schema_title
622
+
619
623
  if json_schema_description := json_schema.pop('description', None):
620
624
  if description is None:
621
625
  description = json_schema_description
pydantic_ai/_utils.py CHANGED
@@ -60,7 +60,12 @@ def is_model_like(type_: Any) -> bool:
60
60
  return (
61
61
  isinstance(type_, type)
62
62
  and not isinstance(type_, GenericAlias)
63
- and (issubclass(type_, BaseModel) or is_dataclass(type_) or is_typeddict(type_)) # pyright: ignore[reportUnknownArgumentType]
63
+ and (
64
+ issubclass(type_, BaseModel)
65
+ or is_dataclass(type_) # pyright: ignore[reportUnknownArgumentType]
66
+ or is_typeddict(type_) # pyright: ignore[reportUnknownArgumentType]
67
+ or getattr(type_, '__is_model_like__', False) # pyright: ignore[reportUnknownArgumentType]
68
+ )
64
69
  )
65
70
 
66
71
 
pydantic_ai/agent.py CHANGED
@@ -57,14 +57,14 @@ ModelRequestNode = _agent_graph.ModelRequestNode
57
57
  UserPromptNode = _agent_graph.UserPromptNode
58
58
 
59
59
  if TYPE_CHECKING:
60
+ from fasta2a.applications import FastA2A
61
+ from fasta2a.broker import Broker
62
+ from fasta2a.schema import AgentProvider, Skill
63
+ from fasta2a.storage import Storage
60
64
  from starlette.middleware import Middleware
61
65
  from starlette.routing import Route
62
66
  from starlette.types import ExceptionHandler, Lifespan
63
67
 
64
- from fasta2a.applications import FastA2A
65
- from fasta2a.broker import Broker
66
- from fasta2a.schema import Provider, Skill
67
- from fasta2a.storage import Storage
68
68
  from pydantic_ai.mcp import MCPServer
69
69
 
70
70
 
@@ -500,7 +500,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
500
500
  @overload
501
501
  def iter(
502
502
  self,
503
- user_prompt: str | Sequence[_messages.UserContent] | None,
503
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
504
504
  *,
505
505
  output_type: None = None,
506
506
  message_history: list[_messages.ModelMessage] | None = None,
@@ -516,7 +516,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
516
516
  @overload
517
517
  def iter(
518
518
  self,
519
- user_prompt: str | Sequence[_messages.UserContent] | None,
519
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
520
520
  *,
521
521
  output_type: OutputSpec[RunOutputDataT],
522
522
  message_history: list[_messages.ModelMessage] | None = None,
@@ -533,7 +533,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
533
533
  @deprecated('`result_type` is deprecated, use `output_type` instead.')
534
534
  def iter(
535
535
  self,
536
- user_prompt: str | Sequence[_messages.UserContent] | None,
536
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
537
537
  *,
538
538
  result_type: type[RunOutputDataT],
539
539
  message_history: list[_messages.ModelMessage] | None = None,
@@ -674,12 +674,14 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
674
674
  # typecast reasonable, even though it is possible to violate it with otherwise-type-checked code.
675
675
  output_validators = cast(list[_output.OutputValidator[AgentDepsT, RunOutputDataT]], self._output_validators)
676
676
 
677
- model_settings = merge_model_settings(self.model_settings, model_settings)
677
+ # Merge model settings in order of precedence: run > agent > model
678
+ merged_settings = merge_model_settings(model_used.settings, self.model_settings)
679
+ model_settings = merge_model_settings(merged_settings, model_settings)
678
680
  usage_limits = usage_limits or _usage.UsageLimits()
679
681
 
680
682
  if isinstance(model_used, InstrumentedModel):
681
- instrumentation_settings = model_used.settings
682
- tracer = model_used.settings.tracer
683
+ instrumentation_settings = model_used.instrumentation_settings
684
+ tracer = model_used.instrumentation_settings.tracer
683
685
  else:
684
686
  instrumentation_settings = None
685
687
  tracer = NoOpTracer()
@@ -1764,7 +1766,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1764
1766
  url: str = 'http://localhost:8000',
1765
1767
  version: str = '1.0.0',
1766
1768
  description: str | None = None,
1767
- provider: Provider | None = None,
1769
+ provider: AgentProvider | None = None,
1768
1770
  skills: list[Skill] | None = None,
1769
1771
  # Starlette
1770
1772
  debug: bool = False,
@@ -321,6 +321,27 @@ class Model(ABC):
321
321
  """Abstract class for a model."""
322
322
 
323
323
  _profile: ModelProfileSpec | None = None
324
+ _settings: ModelSettings | None = None
325
+
326
+ def __init__(
327
+ self,
328
+ *,
329
+ settings: ModelSettings | None = None,
330
+ profile: ModelProfileSpec | None = None,
331
+ ) -> None:
332
+ """Initialize the model with optional settings and profile.
333
+
334
+ Args:
335
+ settings: Model-specific settings that will be used as defaults for this model.
336
+ profile: The model profile to use.
337
+ """
338
+ self._settings = settings
339
+ self._profile = profile
340
+
341
+ @property
342
+ def settings(self) -> ModelSettings | None:
343
+ """Get the model settings."""
344
+ return self._settings
324
345
 
325
346
  @abstractmethod
326
347
  async def request(
@@ -127,6 +127,7 @@ class AnthropicModel(Model):
127
127
  *,
128
128
  provider: Literal['anthropic'] | Provider[AsyncAnthropic] = 'anthropic',
129
129
  profile: ModelProfileSpec | None = None,
130
+ settings: ModelSettings | None = None,
130
131
  ):
131
132
  """Initialize an Anthropic model.
132
133
 
@@ -136,13 +137,15 @@ class AnthropicModel(Model):
136
137
  provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
137
138
  instance of `Provider[AsyncAnthropic]`. If not provided, the other parameters will be used.
138
139
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
140
+ settings: Default model settings for this model instance.
139
141
  """
140
142
  self._model_name = model_name
141
143
 
142
144
  if isinstance(provider, str):
143
145
  provider = infer_provider(provider)
144
146
  self.client = provider.client
145
- self._profile = profile or provider.model_profile
147
+
148
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
146
149
 
147
150
  @property
148
151
  def base_url(self) -> str:
@@ -202,6 +202,7 @@ class BedrockConverseModel(Model):
202
202
  *,
203
203
  provider: Literal['bedrock'] | Provider[BaseClient] = 'bedrock',
204
204
  profile: ModelProfileSpec | None = None,
205
+ settings: ModelSettings | None = None,
205
206
  ):
206
207
  """Initialize a Bedrock model.
207
208
 
@@ -213,13 +214,15 @@ class BedrockConverseModel(Model):
213
214
  'bedrock' or an instance of `Provider[BaseClient]`. If not provided, a new provider will be
214
215
  created using the other parameters.
215
216
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
217
+ settings: Model-specific settings that will be used as defaults for this model.
216
218
  """
217
219
  self._model_name = model_name
218
220
 
219
221
  if isinstance(provider, str):
220
222
  provider = infer_provider(provider)
221
223
  self.client = cast('BedrockRuntimeClient', provider.client)
222
- self._profile = profile or provider.model_profile
224
+
225
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
223
226
 
224
227
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolTypeDef]:
225
228
  tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
@@ -111,6 +111,7 @@ class CohereModel(Model):
111
111
  *,
112
112
  provider: Literal['cohere'] | Provider[AsyncClientV2] = 'cohere',
113
113
  profile: ModelProfileSpec | None = None,
114
+ settings: ModelSettings | None = None,
114
115
  ):
115
116
  """Initialize an Cohere model.
116
117
 
@@ -121,13 +122,15 @@ class CohereModel(Model):
121
122
  'cohere' or an instance of `Provider[AsyncClientV2]`. If not provided, a new provider will be
122
123
  created using the other parameters.
123
124
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
125
+ settings: Model-specific settings that will be used as defaults for this model.
124
126
  """
125
127
  self._model_name = model_name
126
128
 
127
129
  if isinstance(provider, str):
128
130
  provider = infer_provider(provider)
129
131
  self.client = provider.client
130
- self._profile = profile or provider.model_profile
132
+
133
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
131
134
 
132
135
  @property
133
136
  def base_url(self) -> str:
@@ -42,6 +42,7 @@ class FallbackModel(Model):
42
42
  fallback_models: The names or instances of the fallback models to use upon failure.
43
43
  fallback_on: A callable or tuple of exceptions that should trigger a fallback.
44
44
  """
45
+ super().__init__()
45
46
  self.models = [infer_model(default_model), *[infer_model(m) for m in fallback_models]]
46
47
 
47
48
  if isinstance(fallback_on, tuple):
@@ -52,7 +52,12 @@ class FunctionModel(Model):
52
52
 
53
53
  @overload
54
54
  def __init__(
55
- self, function: FunctionDef, *, model_name: str | None = None, profile: ModelProfileSpec | None = None
55
+ self,
56
+ function: FunctionDef,
57
+ *,
58
+ model_name: str | None = None,
59
+ profile: ModelProfileSpec | None = None,
60
+ settings: ModelSettings | None = None,
56
61
  ) -> None: ...
57
62
 
58
63
  @overload
@@ -62,6 +67,7 @@ class FunctionModel(Model):
62
67
  stream_function: StreamFunctionDef,
63
68
  model_name: str | None = None,
64
69
  profile: ModelProfileSpec | None = None,
70
+ settings: ModelSettings | None = None,
65
71
  ) -> None: ...
66
72
 
67
73
  @overload
@@ -72,6 +78,7 @@ class FunctionModel(Model):
72
78
  stream_function: StreamFunctionDef,
73
79
  model_name: str | None = None,
74
80
  profile: ModelProfileSpec | None = None,
81
+ settings: ModelSettings | None = None,
75
82
  ) -> None: ...
76
83
 
77
84
  def __init__(
@@ -81,6 +88,7 @@ class FunctionModel(Model):
81
88
  stream_function: StreamFunctionDef | None = None,
82
89
  model_name: str | None = None,
83
90
  profile: ModelProfileSpec | None = None,
91
+ settings: ModelSettings | None = None,
84
92
  ):
85
93
  """Initialize a `FunctionModel`.
86
94
 
@@ -91,16 +99,19 @@ class FunctionModel(Model):
91
99
  stream_function: The function to call for streamed requests.
92
100
  model_name: The name of the model. If not provided, a name is generated from the function names.
93
101
  profile: The model profile to use.
102
+ settings: Model-specific settings that will be used as defaults for this model.
94
103
  """
95
104
  if function is None and stream_function is None:
96
105
  raise TypeError('Either `function` or `stream_function` must be provided')
106
+
97
107
  self.function = function
98
108
  self.stream_function = stream_function
99
109
 
100
110
  function_name = self.function.__name__ if self.function is not None else ''
101
111
  stream_function_name = self.stream_function.__name__ if self.stream_function is not None else ''
102
112
  self._model_name = model_name or f'function:{function_name}:{stream_function_name}'
103
- self._profile = profile
113
+
114
+ super().__init__(settings=settings, profile=profile)
104
115
 
105
116
  async def request(
106
117
  self,
@@ -133,6 +133,7 @@ class GeminiModel(Model):
133
133
  *,
134
134
  provider: Literal['google-gla', 'google-vertex'] | Provider[httpx.AsyncClient] = 'google-gla',
135
135
  profile: ModelProfileSpec | None = None,
136
+ settings: ModelSettings | None = None,
136
137
  ):
137
138
  """Initialize a Gemini model.
138
139
 
@@ -142,6 +143,7 @@ class GeminiModel(Model):
142
143
  'google-gla' or 'google-vertex' or an instance of `Provider[httpx.AsyncClient]`.
143
144
  If not provided, a new provider will be created using the other parameters.
144
145
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
146
+ settings: Default model settings for this model instance.
145
147
  """
146
148
  self._model_name = model_name
147
149
  self._provider = provider
@@ -151,7 +153,8 @@ class GeminiModel(Model):
151
153
  self._system = provider.name
152
154
  self.client = provider.client
153
155
  self._url = str(self.client.base_url)
154
- self._profile = profile or provider.model_profile
156
+
157
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
155
158
 
156
159
  @property
157
160
  def base_url(self) -> str:
@@ -921,10 +924,10 @@ def _ensure_decodeable(content: bytearray) -> bytearray:
921
924
 
922
925
  This is a temporary workaround until https://github.com/pydantic/pydantic-core/issues/1633 is resolved
923
926
  """
924
- while True:
925
- try:
926
- content.decode()
927
- except UnicodeDecodeError:
928
- content = content[:-1] # this will definitely succeed before we run out of bytes
929
- else:
930
- return content
927
+ try:
928
+ content.decode()
929
+ except UnicodeDecodeError as e:
930
+ # e.start marks the start of the invalid decoded bytes, so cut up to before the first invalid byte
931
+ return content[: e.start]
932
+ else:
933
+ return content
@@ -151,6 +151,7 @@ class GoogleModel(Model):
151
151
  *,
152
152
  provider: Literal['google-gla', 'google-vertex'] | Provider[genai.Client] = 'google-gla',
153
153
  profile: ModelProfileSpec | None = None,
154
+ settings: ModelSettings | None = None,
154
155
  ):
155
156
  """Initialize a Gemini model.
156
157
 
@@ -160,6 +161,7 @@ class GoogleModel(Model):
160
161
  'google-gla' or 'google-vertex' or an instance of `Provider[httpx.AsyncClient]`.
161
162
  If not provided, a new provider will be created using the other parameters.
162
163
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
164
+ settings: The model settings to use. Defaults to None.
163
165
  """
164
166
  self._model_name = model_name
165
167
 
@@ -169,7 +171,8 @@ class GoogleModel(Model):
169
171
  self._provider = provider
170
172
  self._system = provider.name
171
173
  self.client = provider.client
172
- self._profile = profile or provider.model_profile
174
+
175
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
173
176
 
174
177
  @property
175
178
  def base_url(self) -> str:
@@ -120,6 +120,7 @@ class GroqModel(Model):
120
120
  *,
121
121
  provider: Literal['groq'] | Provider[AsyncGroq] = 'groq',
122
122
  profile: ModelProfileSpec | None = None,
123
+ settings: ModelSettings | None = None,
123
124
  ):
124
125
  """Initialize a Groq model.
125
126
 
@@ -130,13 +131,15 @@ class GroqModel(Model):
130
131
  'groq' or an instance of `Provider[AsyncGroq]`. If not provided, a new provider will be
131
132
  created using the other parameters.
132
133
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
134
+ settings: Model-specific settings that will be used as defaults for this model.
133
135
  """
134
136
  self._model_name = model_name
135
137
 
136
138
  if isinstance(provider, str):
137
139
  provider = infer_provider(provider)
138
140
  self.client = provider.client
139
- self._profile = profile or provider.model_profile
141
+
142
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
140
143
 
141
144
  @property
142
145
  def base_url(self) -> str:
@@ -182,15 +182,15 @@ GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'
182
182
  GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
183
183
 
184
184
 
185
- @dataclass
185
+ @dataclass(init=False)
186
186
  class InstrumentedModel(WrapperModel):
187
187
  """Model which wraps another model so that requests are instrumented with OpenTelemetry.
188
188
 
189
189
  See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info.
190
190
  """
191
191
 
192
- settings: InstrumentationSettings
193
- """Configuration for instrumenting requests."""
192
+ instrumentation_settings: InstrumentationSettings
193
+ """Instrumentation settings for this model."""
194
194
 
195
195
  def __init__(
196
196
  self,
@@ -198,7 +198,7 @@ class InstrumentedModel(WrapperModel):
198
198
  options: InstrumentationSettings | None = None,
199
199
  ) -> None:
200
200
  super().__init__(wrapped)
201
- self.settings = options or InstrumentationSettings()
201
+ self.instrumentation_settings = options or InstrumentationSettings()
202
202
 
203
203
  async def request(
204
204
  self,
@@ -260,7 +260,7 @@ class InstrumentedModel(WrapperModel):
260
260
 
261
261
  record_metrics: Callable[[], None] | None = None
262
262
  try:
263
- with self.settings.tracer.start_as_current_span(span_name, attributes=attributes) as span:
263
+ with self.instrumentation_settings.tracer.start_as_current_span(span_name, attributes=attributes) as span:
264
264
 
265
265
  def finish(response: ModelResponse):
266
266
  # FallbackModel updates these span attributes.
@@ -278,12 +278,12 @@ class InstrumentedModel(WrapperModel):
278
278
  'gen_ai.response.model': response_model,
279
279
  }
280
280
  if response.usage.request_tokens: # pragma: no branch
281
- self.settings.tokens_histogram.record(
281
+ self.instrumentation_settings.tokens_histogram.record(
282
282
  response.usage.request_tokens,
283
283
  {**metric_attributes, 'gen_ai.token.type': 'input'},
284
284
  )
285
285
  if response.usage.response_tokens: # pragma: no branch
286
- self.settings.tokens_histogram.record(
286
+ self.instrumentation_settings.tokens_histogram.record(
287
287
  response.usage.response_tokens,
288
288
  {**metric_attributes, 'gen_ai.token.type': 'output'},
289
289
  )
@@ -294,8 +294,8 @@ class InstrumentedModel(WrapperModel):
294
294
  if not span.is_recording():
295
295
  return
296
296
 
297
- events = self.settings.messages_to_otel_events(messages)
298
- for event in self.settings.messages_to_otel_events([response]):
297
+ events = self.instrumentation_settings.messages_to_otel_events(messages)
298
+ for event in self.instrumentation_settings.messages_to_otel_events([response]):
299
299
  events.append(
300
300
  Event(
301
301
  'gen_ai.choice',
@@ -328,9 +328,9 @@ class InstrumentedModel(WrapperModel):
328
328
  record_metrics()
329
329
 
330
330
  def _emit_events(self, span: Span, events: list[Event]) -> None:
331
- if self.settings.event_mode == 'logs':
331
+ if self.instrumentation_settings.event_mode == 'logs':
332
332
  for event in events:
333
- self.settings.event_logger.emit(event)
333
+ self.instrumentation_settings.event_logger.emit(event)
334
334
  else:
335
335
  attr_name = 'events'
336
336
  span.set_attributes(
@@ -125,6 +125,7 @@ class MistralModel(Model):
125
125
  provider: Literal['mistral'] | Provider[Mistral] = 'mistral',
126
126
  profile: ModelProfileSpec | None = None,
127
127
  json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",
128
+ settings: ModelSettings | None = None,
128
129
  ):
129
130
  """Initialize a Mistral model.
130
131
 
@@ -135,6 +136,7 @@ class MistralModel(Model):
135
136
  created using the other parameters.
136
137
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
137
138
  json_mode_schema_prompt: The prompt to show when the model expects a JSON object as input.
139
+ settings: Model-specific settings that will be used as defaults for this model.
138
140
  """
139
141
  self._model_name = model_name
140
142
  self.json_mode_schema_prompt = json_mode_schema_prompt
@@ -142,7 +144,8 @@ class MistralModel(Model):
142
144
  if isinstance(provider, str):
143
145
  provider = infer_provider(provider)
144
146
  self.client = provider.client
145
- self._profile = profile or provider.model_profile
147
+
148
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
146
149
 
147
150
  @property
148
151
  def base_url(self) -> str:
@@ -195,6 +195,7 @@ class OpenAIModel(Model):
195
195
  | Provider[AsyncOpenAI] = 'openai',
196
196
  profile: ModelProfileSpec | None = None,
197
197
  system_prompt_role: OpenAISystemPromptRole | None = None,
198
+ settings: ModelSettings | None = None,
198
199
  ):
199
200
  """Initialize an OpenAI model.
200
201
 
@@ -206,16 +207,18 @@ class OpenAIModel(Model):
206
207
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
207
208
  system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`.
208
209
  In the future, this may be inferred from the model name.
210
+ settings: Default model settings for this model instance.
209
211
  """
210
212
  self._model_name = model_name
211
213
 
212
214
  if isinstance(provider, str):
213
215
  provider = infer_provider(provider)
214
216
  self.client = provider.client
215
- self._profile = profile or provider.model_profile
216
217
 
217
218
  self.system_prompt_role = system_prompt_role
218
219
 
220
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
221
+
219
222
  @property
220
223
  def base_url(self) -> str:
221
224
  return str(self.client.base_url)
@@ -598,6 +601,7 @@ class OpenAIResponsesModel(Model):
598
601
  provider: Literal['openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together']
599
602
  | Provider[AsyncOpenAI] = 'openai',
600
603
  profile: ModelProfileSpec | None = None,
604
+ settings: ModelSettings | None = None,
601
605
  ):
602
606
  """Initialize an OpenAI Responses model.
603
607
 
@@ -605,13 +609,15 @@ class OpenAIResponsesModel(Model):
605
609
  model_name: The name of the OpenAI model to use.
606
610
  provider: The provider to use. Defaults to `'openai'`.
607
611
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
612
+ settings: Default model settings for this model instance.
608
613
  """
609
614
  self._model_name = model_name
610
615
 
611
616
  if isinstance(provider, str):
612
617
  provider = infer_provider(provider)
613
618
  self.client = provider.client
614
- self._profile = profile or provider.model_profile
619
+
620
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
615
621
 
616
622
  @property
617
623
  def model_name(self) -> OpenAIModelName:
@@ -988,6 +994,12 @@ class OpenAIStreamedResponse(StreamedResponse):
988
994
  if content is not None:
989
995
  yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=content)
990
996
 
997
+ # Handle reasoning part of the response, present in DeepSeek models
998
+ if reasoning_content := getattr(choice.delta, 'reasoning_content', None):
999
+ yield self._parts_manager.handle_thinking_delta(
1000
+ vendor_part_id='reasoning_content', content=reasoning_content
1001
+ )
1002
+
991
1003
  for dtc in choice.delta.tool_calls or []:
992
1004
  maybe_event = self._parts_manager.handle_tool_call_delta(
993
1005
  vendor_part_id=dtc.index,
@@ -24,6 +24,7 @@ from ..messages import (
24
24
  ToolCallPart,
25
25
  ToolReturnPart,
26
26
  )
27
+ from ..profiles import ModelProfileSpec
27
28
  from ..settings import ModelSettings
28
29
  from ..tools import ToolDefinition
29
30
  from ..usage import Usage
@@ -45,7 +46,7 @@ class _WrappedToolOutput:
45
46
  value: Any | None
46
47
 
47
48
 
48
- @dataclass
49
+ @dataclass(init=False)
49
50
  class TestModel(Model):
50
51
  """A model specifically for testing purposes.
51
52
 
@@ -79,6 +80,26 @@ class TestModel(Model):
79
80
  _model_name: str = field(default='test', repr=False)
80
81
  _system: str = field(default='test', repr=False)
81
82
 
83
+ def __init__(
84
+ self,
85
+ *,
86
+ call_tools: list[str] | Literal['all'] = 'all',
87
+ custom_output_text: str | None = None,
88
+ custom_output_args: Any | None = None,
89
+ seed: int = 0,
90
+ profile: ModelProfileSpec | None = None,
91
+ settings: ModelSettings | None = None,
92
+ ):
93
+ """Initialize TestModel with optional settings and profile."""
94
+ self.call_tools = call_tools
95
+ self.custom_output_text = custom_output_text
96
+ self.custom_output_args = custom_output_args
97
+ self.seed = seed
98
+ self.last_model_request_parameters = None
99
+ self._model_name = 'test'
100
+ self._system = 'test'
101
+ super().__init__(settings=settings, profile=profile)
102
+
82
103
  async def request(
83
104
  self,
84
105
  messages: list[ModelMessage],
@@ -23,6 +23,7 @@ class WrapperModel(Model):
23
23
  """The underlying model being wrapped."""
24
24
 
25
25
  def __init__(self, wrapped: Model | KnownModelName):
26
+ super().__init__()
26
27
  self.wrapped = infer_model(wrapped)
27
28
 
28
29
  async def request(self, *args: Any, **kwargs: Any) -> ModelResponse:
@@ -53,5 +54,10 @@ class WrapperModel(Model):
53
54
  def profile(self) -> ModelProfile:
54
55
  return self.wrapped.profile
55
56
 
57
+ @property
58
+ def settings(self) -> ModelSettings | None:
59
+ """Get the settings from the wrapped model."""
60
+ return self.wrapped.settings
61
+
56
62
  def __getattr__(self, item: str):
57
63
  return getattr(self.wrapped, item) # pragma: no cover
pydantic_ai/output.py CHANGED
@@ -2,10 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  from collections.abc import Awaitable, Sequence
4
4
  from dataclasses import dataclass
5
- from typing import Callable, Generic, Literal, Union
5
+ from typing import Any, Callable, Generic, Literal, Union
6
6
 
7
+ from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler
8
+ from pydantic.json_schema import JsonSchemaValue
9
+ from pydantic_core import core_schema
7
10
  from typing_extensions import TypeAliasType, TypeVar
8
11
 
12
+ from . import _utils
9
13
  from .tools import RunContext
10
14
 
11
15
  __all__ = (
@@ -14,6 +18,7 @@ __all__ = (
14
18
  'NativeOutput',
15
19
  'PromptedOutput',
16
20
  'TextOutput',
21
+ 'StructuredDict',
17
22
  # types
18
23
  'OutputDataT',
19
24
  'OutputMode',
@@ -266,6 +271,65 @@ class TextOutput(Generic[OutputDataT]):
266
271
  """The function that will be called to process the model's plain text output. The function must take a single string argument."""
267
272
 
268
273
 
274
+ def StructuredDict(
275
+ json_schema: JsonSchemaValue, name: str | None = None, description: str | None = None
276
+ ) -> type[JsonSchemaValue]:
277
+ """Returns a `dict[str, Any]` subclass with a JSON schema attached that will be used for structured output.
278
+
279
+ Args:
280
+ json_schema: A JSON schema of type `object` defining the structure of the dictionary content.
281
+ name: Optional name of the structured output. If not provided, the `title` field of the JSON schema will be used if it's present.
282
+ description: Optional description of the structured output. If not provided, the `description` field of the JSON schema will be used if it's present.
283
+
284
+ Example:
285
+ ```python {title="structured_dict.py"}
286
+ from pydantic_ai import Agent, StructuredDict
287
+
288
+
289
+ schema = {
290
+ "type": "object",
291
+ "properties": {
292
+ "name": {"type": "string"},
293
+ "age": {"type": "integer"}
294
+ },
295
+ "required": ["name", "age"]
296
+ }
297
+
298
+ agent = Agent('openai:gpt-4o', output_type=StructuredDict(schema))
299
+ result = agent.run_sync("Create a person")
300
+ print(result.output)
301
+ #> {'name': 'John Doe', 'age': 30}
302
+ ```
303
+ """
304
+ json_schema = _utils.check_object_json_schema(json_schema)
305
+
306
+ if name:
307
+ json_schema['title'] = name
308
+
309
+ if description:
310
+ json_schema['description'] = description
311
+
312
+ class _StructuredDict(JsonSchemaValue):
313
+ __is_model_like__ = True
314
+
315
+ @classmethod
316
+ def __get_pydantic_core_schema__(
317
+ cls, source_type: Any, handler: GetCoreSchemaHandler
318
+ ) -> core_schema.CoreSchema:
319
+ return core_schema.dict_schema(
320
+ keys_schema=core_schema.str_schema(),
321
+ values_schema=core_schema.any_schema(),
322
+ )
323
+
324
+ @classmethod
325
+ def __get_pydantic_json_schema__(
326
+ cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
327
+ ) -> JsonSchemaValue:
328
+ return json_schema
329
+
330
+ return _StructuredDict
331
+
332
+
269
333
  OutputSpec = TypeAliasType(
270
334
  'OutputSpec',
271
335
  Union[
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.4.0
33
+ Requires-Dist: pydantic-graph==0.4.2
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.4.0; extra == 'a2a'
37
+ Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.4.0; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.4.2; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.24.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -1,25 +1,25 @@
1
- pydantic_ai/__init__.py,sha256=Ns04g4Efqkzwccs8w2nGphfWbptMlIJYG8vIJbGGyG0,1262
1
+ pydantic_ai/__init__.py,sha256=h6Rll8pEzUUUX6SckosummoAFbq7ctfBlI6WSyaXR4I,1300
2
2
  pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
- pydantic_ai/_a2a.py,sha256=8nNtx6GENDt2Ej3f1ui9L-FuNQBYVELpJFfwz-y7fUw,7234
3
+ pydantic_ai/_a2a.py,sha256=PFgqW6I3qh3deY4WFfubTUroig9-NaAWxbeMxYjdtfI,12067
4
4
  pydantic_ai/_agent_graph.py,sha256=rtzyBXN4bzEDBeRkRwF031ORktSMbuGz9toZmSqUxNI,42153
5
5
  pydantic_ai/_cli.py,sha256=R-sE-9gYqPxV5-5utso4g-bzAKMiTCdo33XOVqE0ZEg,13206
6
6
  pydantic_ai/_function_schema.py,sha256=BZus5y51eqiGQKxQIcCiDoSPml3AtAb12-st_aujU2k,10813
7
7
  pydantic_ai/_griffe.py,sha256=Ugft16ZHw9CN_6-lW0Svn6jESK9zHXO_x4utkGBkbBI,5253
8
8
  pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
9
- pydantic_ai/_output.py,sha256=8qOx2hEwxpcoS5P8OLqOAWj94KfODDVqrPHnEIhI-90,33164
9
+ pydantic_ai/_output.py,sha256=pZfHuMDqDilYDmS6xkjGTfsAdeFqsbefJ2tPVHWalbw,33296
10
10
  pydantic_ai/_parts_manager.py,sha256=Lioi8b7Nfyax09yQu8jTkMzxd26dYDrdAqhYvjRSKqQ,16182
11
11
  pydantic_ai/_run_context.py,sha256=zNkSyiQSH-YweO39ii3iB2taouUOodo3sTjz2Lrj4Pc,1792
12
12
  pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
13
13
  pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
14
- pydantic_ai/_utils.py,sha256=SGXEiGCnMae1Iz_eZKUs6ni_tGMPkDaJ4W3W3YMoP5w,15545
15
- pydantic_ai/agent.py,sha256=Fs-bm9eeCvanwiKiD-IS_XLcMmgNWucJylXgrIDH6WM,96186
14
+ pydantic_ai/_utils.py,sha256=9QSHZhbrCbUK18ckchC55OkBkP-1o6xhAxUkEMo9DSQ,15741
15
+ pydantic_ai/agent.py,sha256=BCrhhRcdrt77vhakFv8iI0It8uD_yjcHwfnYiuNKAns,96409
16
16
  pydantic_ai/direct.py,sha256=WRfgke3zm-eeR39LTuh9XI2TrdHXAqO81eDvFwih4Ko,14803
17
17
  pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
18
18
  pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
19
19
  pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
20
20
  pydantic_ai/mcp.py,sha256=6RvxXIn6bUlL2XWpX69i8G3atU-HLLZBgKc93dYqeVo,21830
21
21
  pydantic_ai/messages.py,sha256=ykB4jzDwPGFkgQSJagOdurBv5-DTtCaY-y9671FYz7E,39256
22
- pydantic_ai/output.py,sha256=gq-8H2YKgbKSTxp_HUMym57ZUkwupHyS4sCOzedlXTI,9315
22
+ pydantic_ai/output.py,sha256=HU1dIiKyCaCvSxg8U6YMRvn1U50l0D9NMvGt_wqp_xI,11512
23
23
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  pydantic_ai/result.py,sha256=GVzXf7yjR2lKBDw9k-8PlhJgCpE3dVHiyLL0dFPvs7I,25603
25
25
  pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
@@ -31,21 +31,21 @@ pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQ
31
31
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  pydantic_ai/ext/aci.py,sha256=eiuWamUh90kexWyuGw_Fw2kM-EAA6Pv-IfNhf5hQ8fs,2123
33
33
  pydantic_ai/ext/langchain.py,sha256=iSyACZiJDDvxr0BKYl9dLxe4BPezCBHxgz_2Vk3W-Ak,1973
34
- pydantic_ai/models/__init__.py,sha256=B8vG0crUDCO3Bvd8fVeMNPzZH2Un61rEJFxSaumoUl4,29101
35
- pydantic_ai/models/anthropic.py,sha256=ooRh6Yh0jLj78IKjgaYTN0UbB2Ku8ZhuEBi8v8kymoE,23679
36
- pydantic_ai/models/bedrock.py,sha256=i8BNOFEYGiRYA4ZEFwqHzJHf3EP54akVzZHdEUJohiw,29234
37
- pydantic_ai/models/cohere.py,sha256=qgYegjfOsqXbRcjXCbg0jaexbuxh1SrS9_mZdzzJVbM,12623
38
- pydantic_ai/models/fallback.py,sha256=sTYw8wW8iGgFIPG2Oynsucb9orG6wbV_h-9k5vKil4I,5103
39
- pydantic_ai/models/function.py,sha256=nfCjRmbcF7sdK_nsak1fvzz9Xkptx5WhsxvWdB02zec,12113
40
- pydantic_ai/models/gemini.py,sha256=22qucwayi8x20yvZY6qeHH4WRyEObfIkrCQ5cluejdQ,38488
41
- pydantic_ai/models/google.py,sha256=PFioCPeuf5_f80s9NiRSxFZawvfYbUUhpaW7mUg8frg,24072
42
- pydantic_ai/models/groq.py,sha256=tmYTPKsMMhtIms_9muPKYQvGZ98b_kax7t8H1YE1vPU,18500
43
- pydantic_ai/models/instrumented.py,sha256=olTa7Fl2BwHLvTLT6sSrS2HOS7UyWg182Xujx8hutBw,15947
34
+ pydantic_ai/models/__init__.py,sha256=-zXNNMR5E9bPzbxO95m4fQ2RCGotzkuZVkF4OyGKBoA,29730
35
+ pydantic_ai/models/anthropic.py,sha256=v-Nyx0Bvp8XdNVQvOh5NW1PXM_3plcYbvHH8e2DPgUc,23826
36
+ pydantic_ai/models/bedrock.py,sha256=LIathJMiwwbACCEwTwM4EwCMYEwvomeQ_M_SoDnzPnQ,29403
37
+ pydantic_ai/models/cohere.py,sha256=NXk2Fvs6eSGTCuBrGfPB1ioAmDnjI3Ug_1mDc_0bZP0,12792
38
+ pydantic_ai/models/fallback.py,sha256=URaV-dTQWkg99xrlkmknue5lXZWDcEt7cJ1Vsky4oB4,5130
39
+ pydantic_ai/models/function.py,sha256=Qyvg7n9SMyhNVugd9T525OrbWYW8BQedy7kBRpHu48Q,12457
40
+ pydantic_ai/models/gemini.py,sha256=U_hjUMW6R7b8Ik4m1nYf1vQIrbkzGpgkZLtlHTvc0fw,38651
41
+ pydantic_ai/models/google.py,sha256=P8eKVf5DXwxfR11t34c1YlYTD90yYVWEj4weklQqhNk,24216
42
+ pydantic_ai/models/groq.py,sha256=Y4cAoKHjMMkK0Ggs10LfhSJ-Wc-LPJoX-6GPLqZYprc,18669
43
+ pydantic_ai/models/instrumented.py,sha256=pFbVRmDMb30TZdbTejQZQk3lwcFiED9n-kz7Wl23Lb8,16102
44
44
  pydantic_ai/models/mcp_sampling.py,sha256=q9nnjNEAAbhrfRc_Qw5z9TtCHMG_SwlCWW9FvKWjh8k,3395
45
- pydantic_ai/models/mistral.py,sha256=d_TQjSQukSztNt6JpFQCqugYTxXQ97GaQBc3zUxOSSA,30555
46
- pydantic_ai/models/openai.py,sha256=ReqpM4gdM0TPSwUCGu2L8VoBFsxy2Y-8PRFhI6d5KcI,53646
47
- pydantic_ai/models/test.py,sha256=STNd79ZoCyyphm0eFRNDoTpvkOzhw1qFw1zgv44kqsg,17441
48
- pydantic_ai/models/wrapper.py,sha256=2g06TxE5kFqfaJCwsDJHp7Rltoj0XXH0OzdpRDOcqNo,1861
45
+ pydantic_ai/models/mistral.py,sha256=KaIpO_vx8YnhcvlRlbmZwarivQpr0Y_r4e_Fj9JdYsQ,30724
46
+ pydantic_ai/models/openai.py,sha256=bjLk7CH_YXaETsfhNOWbrtn6qvuxFqtyUNRd094gS9s,54272
47
+ pydantic_ai/models/test.py,sha256=S8hp3fJZJVSwWl01bBi-q7YpijdbstXhGg3aCPon98o,18227
48
+ pydantic_ai/models/wrapper.py,sha256=A5-ncYhPF8c9S_czGoXkd55s2KOQb65p3jbVpwZiFPA,2043
49
49
  pydantic_ai/profiles/__init__.py,sha256=BXMqUpgRfosmYgcxjKAI9ESCj47JTSa30DhKXEgVLzM,2419
50
50
  pydantic_ai/profiles/_json_schema.py,sha256=sTNHkaK0kbwmbldZp9JRGQNax0f5Qvwy0HkWuu_nGxU,7179
51
51
  pydantic_ai/profiles/amazon.py,sha256=O4ijm1Lpz01vaSiHrkSeGQhbCKV5lyQVtHYqh0pCW_k,339
@@ -76,8 +76,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
76
76
  pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
77
77
  pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
78
78
  pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
79
- pydantic_ai_slim-0.4.0.dist-info/METADATA,sha256=S-ygqOZ0lpsazK_VGyrj8B6l1H9Q7B2bCGYRtmmK4T8,3846
80
- pydantic_ai_slim-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
- pydantic_ai_slim-0.4.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
82
- pydantic_ai_slim-0.4.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
83
- pydantic_ai_slim-0.4.0.dist-info/RECORD,,
79
+ pydantic_ai_slim-0.4.2.dist-info/METADATA,sha256=tUdKEmWmW4dGY2tCE0SH1dsEUwwlHdzzuenxatbWDhY,3846
80
+ pydantic_ai_slim-0.4.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
81
+ pydantic_ai_slim-0.4.2.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
82
+ pydantic_ai_slim-0.4.2.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
83
+ pydantic_ai_slim-0.4.2.dist-info/RECORD,,