pydantic-ai-slim 1.0.12__py3-none-any.whl → 1.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (33) hide show
  1. pydantic_ai/_agent_graph.py +6 -4
  2. pydantic_ai/_instrumentation.py +95 -0
  3. pydantic_ai/_output.py +26 -12
  4. pydantic_ai/_run_context.py +4 -0
  5. pydantic_ai/_tool_manager.py +15 -7
  6. pydantic_ai/agent/__init__.py +67 -34
  7. pydantic_ai/agent/abstract.py +12 -1
  8. pydantic_ai/agent/wrapper.py +11 -3
  9. pydantic_ai/direct.py +2 -2
  10. pydantic_ai/durable_exec/dbos/_agent.py +11 -2
  11. pydantic_ai/durable_exec/temporal/_agent.py +12 -3
  12. pydantic_ai/mcp.py +12 -2
  13. pydantic_ai/models/__init__.py +18 -1
  14. pydantic_ai/models/anthropic.py +8 -0
  15. pydantic_ai/models/bedrock.py +8 -0
  16. pydantic_ai/models/cohere.py +4 -0
  17. pydantic_ai/models/fallback.py +2 -9
  18. pydantic_ai/models/function.py +8 -0
  19. pydantic_ai/models/gemini.py +8 -0
  20. pydantic_ai/models/google.py +12 -0
  21. pydantic_ai/models/groq.py +8 -0
  22. pydantic_ai/models/huggingface.py +8 -2
  23. pydantic_ai/models/instrumented.py +16 -6
  24. pydantic_ai/models/mcp_sampling.py +2 -0
  25. pydantic_ai/models/mistral.py +8 -0
  26. pydantic_ai/models/openai.py +16 -1
  27. pydantic_ai/models/test.py +8 -0
  28. pydantic_ai/models/wrapper.py +7 -0
  29. {pydantic_ai_slim-1.0.12.dist-info → pydantic_ai_slim-1.0.14.dist-info}/METADATA +3 -3
  30. {pydantic_ai_slim-1.0.12.dist-info → pydantic_ai_slim-1.0.14.dist-info}/RECORD +33 -32
  31. {pydantic_ai_slim-1.0.12.dist-info → pydantic_ai_slim-1.0.14.dist-info}/WHEEL +0 -0
  32. {pydantic_ai_slim-1.0.12.dist-info → pydantic_ai_slim-1.0.14.dist-info}/entry_points.txt +0 -0
  33. {pydantic_ai_slim-1.0.12.dist-info → pydantic_ai_slim-1.0.14.dist-info}/licenses/LICENSE +0 -0
@@ -22,7 +22,8 @@ from pydantic_ai import (
22
22
  models,
23
23
  usage as _usage,
24
24
  )
25
- from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent
25
+ from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent
26
+ from pydantic_ai.agent.abstract import Instructions, RunOutputDataT
26
27
  from pydantic_ai.exceptions import UserError
27
28
  from pydantic_ai.models import Model
28
29
  from pydantic_ai.output import OutputDataT, OutputSpec
@@ -748,8 +749,9 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
748
749
  model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
749
750
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
750
751
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
752
+ instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
751
753
  ) -> Iterator[None]:
752
- """Context manager to temporarily override agent dependencies, model, toolsets, or tools.
754
+ """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
753
755
 
754
756
  This is particularly useful when testing.
755
757
  You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
@@ -759,6 +761,7 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
759
761
  model: The model to use instead of the model passed to the agent run.
760
762
  toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
761
763
  tools: The tools to use instead of the tools registered with the agent.
764
+ instructions: The instructions to use instead of the instructions registered with the agent.
762
765
  """
763
766
  if workflow.in_workflow():
764
767
  if _utils.is_set(model):
@@ -774,5 +777,11 @@ class TemporalAgent(WrapperAgent[AgentDepsT, OutputDataT]):
774
777
  'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.'
775
778
  )
776
779
 
777
- with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools):
780
+ with super().override(
781
+ deps=deps,
782
+ model=model,
783
+ toolsets=toolsets,
784
+ tools=tools,
785
+ instructions=instructions,
786
+ ):
778
787
  yield
pydantic_ai/mcp.py CHANGED
@@ -112,6 +112,7 @@ class MCPServer(AbstractToolset[Any], ABC):
112
112
  _client: ClientSession
113
113
  _read_stream: MemoryObjectReceiveStream[SessionMessage | Exception]
114
114
  _write_stream: MemoryObjectSendStream[SessionMessage]
115
+ _server_info: mcp_types.Implementation
115
116
 
116
117
  def __init__(
117
118
  self,
@@ -177,6 +178,15 @@ class MCPServer(AbstractToolset[Any], ABC):
177
178
  def tool_name_conflict_hint(self) -> str:
178
179
  return 'Set the `tool_prefix` attribute to avoid name conflicts.'
179
180
 
181
+ @property
182
+ def server_info(self) -> mcp_types.Implementation:
183
+ """Access the information send by the MCP server during initialization."""
184
+ if getattr(self, '_server_info', None) is None:
185
+ raise AttributeError(
186
+ f'The `{self.__class__.__name__}.server_info` is only instantiated after initialization.'
187
+ )
188
+ return self._server_info
189
+
180
190
  async def list_tools(self) -> list[mcp_types.Tool]:
181
191
  """Retrieve tools that are currently active on the server.
182
192
 
@@ -312,8 +322,8 @@ class MCPServer(AbstractToolset[Any], ABC):
312
322
  self._client = await exit_stack.enter_async_context(client)
313
323
 
314
324
  with anyio.fail_after(self.timeout):
315
- await self._client.initialize()
316
-
325
+ result = await self._client.initialize()
326
+ self._server_info = result.serverInfo
317
327
  if log_level := self.log_level:
318
328
  await self._client.set_logging_level(log_level)
319
329
 
@@ -41,7 +41,7 @@ from ..messages import (
41
41
  )
42
42
  from ..output import OutputMode
43
43
  from ..profiles import DEFAULT_PROFILE, ModelProfile, ModelProfileSpec
44
- from ..settings import ModelSettings
44
+ from ..settings import ModelSettings, merge_model_settings
45
45
  from ..tools import ToolDefinition
46
46
  from ..usage import RequestUsage
47
47
 
@@ -390,6 +390,23 @@ class Model(ABC):
390
390
 
391
391
  return model_request_parameters
392
392
 
393
+ def prepare_request(
394
+ self,
395
+ model_settings: ModelSettings | None,
396
+ model_request_parameters: ModelRequestParameters,
397
+ ) -> tuple[ModelSettings | None, ModelRequestParameters]:
398
+ """Prepare request inputs before they are passed to the provider.
399
+
400
+ This merges the given ``model_settings`` with the model's own ``settings`` attribute and ensures
401
+ ``customize_request_parameters`` is applied to the resolved
402
+ [`ModelRequestParameters`][pydantic_ai.models.ModelRequestParameters]. Subclasses can override this method if
403
+ they need to customize the preparation flow further, but most implementations should simply call
404
+ ``self.prepare_request(...)`` at the start of their ``request`` (and related) methods.
405
+ """
406
+ merged_settings = merge_model_settings(self.settings, model_settings)
407
+ customized_parameters = self.customize_request_parameters(model_request_parameters)
408
+ return merged_settings, customized_parameters
409
+
393
410
  @property
394
411
  @abstractmethod
395
412
  def model_name(self) -> str:
@@ -205,6 +205,10 @@ class AnthropicModel(Model):
205
205
  model_request_parameters: ModelRequestParameters,
206
206
  ) -> ModelResponse:
207
207
  check_allow_model_requests()
208
+ model_settings, model_request_parameters = self.prepare_request(
209
+ model_settings,
210
+ model_request_parameters,
211
+ )
208
212
  response = await self._messages_create(
209
213
  messages, False, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
210
214
  )
@@ -220,6 +224,10 @@ class AnthropicModel(Model):
220
224
  run_context: RunContext[Any] | None = None,
221
225
  ) -> AsyncIterator[StreamedResponse]:
222
226
  check_allow_model_requests()
227
+ model_settings, model_request_parameters = self.prepare_request(
228
+ model_settings,
229
+ model_request_parameters,
230
+ )
223
231
  response = await self._messages_create(
224
232
  messages, True, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
225
233
  )
@@ -264,6 +264,10 @@ class BedrockConverseModel(Model):
264
264
  model_settings: ModelSettings | None,
265
265
  model_request_parameters: ModelRequestParameters,
266
266
  ) -> ModelResponse:
267
+ model_settings, model_request_parameters = self.prepare_request(
268
+ model_settings,
269
+ model_request_parameters,
270
+ )
267
271
  settings = cast(BedrockModelSettings, model_settings or {})
268
272
  response = await self._messages_create(messages, False, settings, model_request_parameters)
269
273
  model_response = await self._process_response(response)
@@ -277,6 +281,10 @@ class BedrockConverseModel(Model):
277
281
  model_request_parameters: ModelRequestParameters,
278
282
  run_context: RunContext[Any] | None = None,
279
283
  ) -> AsyncIterator[StreamedResponse]:
284
+ model_settings, model_request_parameters = self.prepare_request(
285
+ model_settings,
286
+ model_request_parameters,
287
+ )
280
288
  settings = cast(BedrockModelSettings, model_settings or {})
281
289
  response = await self._messages_create(messages, True, settings, model_request_parameters)
282
290
  yield BedrockStreamedResponse(
@@ -165,6 +165,10 @@ class CohereModel(Model):
165
165
  model_request_parameters: ModelRequestParameters,
166
166
  ) -> ModelResponse:
167
167
  check_allow_model_requests()
168
+ model_settings, model_request_parameters = self.prepare_request(
169
+ model_settings,
170
+ model_request_parameters,
171
+ )
168
172
  response = await self._chat(messages, cast(CohereModelSettings, model_settings or {}), model_request_parameters)
169
173
  model_response = self._process_response(response)
170
174
  return model_response
@@ -11,7 +11,6 @@ from pydantic_ai._run_context import RunContext
11
11
  from pydantic_ai.models.instrumented import InstrumentedModel
12
12
 
13
13
  from ..exceptions import FallbackExceptionGroup, ModelHTTPError
14
- from ..settings import merge_model_settings
15
14
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model
16
15
 
17
16
  if TYPE_CHECKING:
@@ -78,10 +77,8 @@ class FallbackModel(Model):
78
77
  exceptions: list[Exception] = []
79
78
 
80
79
  for model in self.models:
81
- customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
82
- merged_settings = merge_model_settings(model.settings, model_settings)
83
80
  try:
84
- response = await model.request(messages, merged_settings, customized_model_request_parameters)
81
+ response = await model.request(messages, model_settings, model_request_parameters)
85
82
  except Exception as exc:
86
83
  if self._fallback_on(exc):
87
84
  exceptions.append(exc)
@@ -105,14 +102,10 @@ class FallbackModel(Model):
105
102
  exceptions: list[Exception] = []
106
103
 
107
104
  for model in self.models:
108
- customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
109
- merged_settings = merge_model_settings(model.settings, model_settings)
110
105
  async with AsyncExitStack() as stack:
111
106
  try:
112
107
  response = await stack.enter_async_context(
113
- model.request_stream(
114
- messages, merged_settings, customized_model_request_parameters, run_context
115
- )
108
+ model.request_stream(messages, model_settings, model_request_parameters, run_context)
116
109
  )
117
110
  except Exception as exc:
118
111
  if self._fallback_on(exc):
@@ -125,6 +125,10 @@ class FunctionModel(Model):
125
125
  model_settings: ModelSettings | None,
126
126
  model_request_parameters: ModelRequestParameters,
127
127
  ) -> ModelResponse:
128
+ model_settings, model_request_parameters = self.prepare_request(
129
+ model_settings,
130
+ model_request_parameters,
131
+ )
128
132
  agent_info = AgentInfo(
129
133
  function_tools=model_request_parameters.function_tools,
130
134
  allow_text_output=model_request_parameters.allow_text_output,
@@ -154,6 +158,10 @@ class FunctionModel(Model):
154
158
  model_request_parameters: ModelRequestParameters,
155
159
  run_context: RunContext[Any] | None = None,
156
160
  ) -> AsyncIterator[StreamedResponse]:
161
+ model_settings, model_request_parameters = self.prepare_request(
162
+ model_settings,
163
+ model_request_parameters,
164
+ )
157
165
  agent_info = AgentInfo(
158
166
  function_tools=model_request_parameters.function_tools,
159
167
  allow_text_output=model_request_parameters.allow_text_output,
@@ -155,6 +155,10 @@ class GeminiModel(Model):
155
155
  model_request_parameters: ModelRequestParameters,
156
156
  ) -> ModelResponse:
157
157
  check_allow_model_requests()
158
+ model_settings, model_request_parameters = self.prepare_request(
159
+ model_settings,
160
+ model_request_parameters,
161
+ )
158
162
  async with self._make_request(
159
163
  messages, False, cast(GeminiModelSettings, model_settings or {}), model_request_parameters
160
164
  ) as http_response:
@@ -171,6 +175,10 @@ class GeminiModel(Model):
171
175
  run_context: RunContext[Any] | None = None,
172
176
  ) -> AsyncIterator[StreamedResponse]:
173
177
  check_allow_model_requests()
178
+ model_settings, model_request_parameters = self.prepare_request(
179
+ model_settings,
180
+ model_request_parameters,
181
+ )
174
182
  async with self._make_request(
175
183
  messages, True, cast(GeminiModelSettings, model_settings or {}), model_request_parameters
176
184
  ) as http_response:
@@ -225,6 +225,10 @@ class GoogleModel(Model):
225
225
  model_request_parameters: ModelRequestParameters,
226
226
  ) -> ModelResponse:
227
227
  check_allow_model_requests()
228
+ model_settings, model_request_parameters = self.prepare_request(
229
+ model_settings,
230
+ model_request_parameters,
231
+ )
228
232
  model_settings = cast(GoogleModelSettings, model_settings or {})
229
233
  response = await self._generate_content(messages, False, model_settings, model_request_parameters)
230
234
  return self._process_response(response)
@@ -236,6 +240,10 @@ class GoogleModel(Model):
236
240
  model_request_parameters: ModelRequestParameters,
237
241
  ) -> usage.RequestUsage:
238
242
  check_allow_model_requests()
243
+ model_settings, model_request_parameters = self.prepare_request(
244
+ model_settings,
245
+ model_request_parameters,
246
+ )
239
247
  model_settings = cast(GoogleModelSettings, model_settings or {})
240
248
  contents, generation_config = await self._build_content_and_config(
241
249
  messages, model_settings, model_request_parameters
@@ -291,6 +299,10 @@ class GoogleModel(Model):
291
299
  run_context: RunContext[Any] | None = None,
292
300
  ) -> AsyncIterator[StreamedResponse]:
293
301
  check_allow_model_requests()
302
+ model_settings, model_request_parameters = self.prepare_request(
303
+ model_settings,
304
+ model_request_parameters,
305
+ )
294
306
  model_settings = cast(GoogleModelSettings, model_settings or {})
295
307
  response = await self._generate_content(messages, True, model_settings, model_request_parameters)
296
308
  yield await self._process_streamed_response(response, model_request_parameters) # type: ignore
@@ -182,6 +182,10 @@ class GroqModel(Model):
182
182
  model_request_parameters: ModelRequestParameters,
183
183
  ) -> ModelResponse:
184
184
  check_allow_model_requests()
185
+ model_settings, model_request_parameters = self.prepare_request(
186
+ model_settings,
187
+ model_request_parameters,
188
+ )
185
189
  try:
186
190
  response = await self._completions_create(
187
191
  messages, False, cast(GroqModelSettings, model_settings or {}), model_request_parameters
@@ -218,6 +222,10 @@ class GroqModel(Model):
218
222
  run_context: RunContext[Any] | None = None,
219
223
  ) -> AsyncIterator[StreamedResponse]:
220
224
  check_allow_model_requests()
225
+ model_settings, model_request_parameters = self.prepare_request(
226
+ model_settings,
227
+ model_request_parameters,
228
+ )
221
229
  response = await self._completions_create(
222
230
  messages, True, cast(GroqModelSettings, model_settings or {}), model_request_parameters
223
231
  )
@@ -166,6 +166,10 @@ class HuggingFaceModel(Model):
166
166
  model_request_parameters: ModelRequestParameters,
167
167
  ) -> ModelResponse:
168
168
  check_allow_model_requests()
169
+ model_settings, model_request_parameters = self.prepare_request(
170
+ model_settings,
171
+ model_request_parameters,
172
+ )
169
173
  response = await self._completions_create(
170
174
  messages, False, cast(HuggingFaceModelSettings, model_settings or {}), model_request_parameters
171
175
  )
@@ -181,6 +185,10 @@ class HuggingFaceModel(Model):
181
185
  run_context: RunContext[Any] | None = None,
182
186
  ) -> AsyncIterator[StreamedResponse]:
183
187
  check_allow_model_requests()
188
+ model_settings, model_request_parameters = self.prepare_request(
189
+ model_settings,
190
+ model_request_parameters,
191
+ )
184
192
  response = await self._completions_create(
185
193
  messages, True, cast(HuggingFaceModelSettings, model_settings or {}), model_request_parameters
186
194
  )
@@ -377,8 +385,6 @@ class HuggingFaceModel(Model):
377
385
  },
378
386
  }
379
387
  )
380
- if f.strict is not None:
381
- tool_param['function']['strict'] = f.strict
382
388
  return tool_param
383
389
 
384
390
  async def _map_user_message(
@@ -21,6 +21,8 @@ from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provide
21
21
  from opentelemetry.util.types import AttributeValue
22
22
  from pydantic import TypeAdapter
23
23
 
24
+ from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION
25
+
24
26
  from .. import _otel_messages
25
27
  from .._run_context import RunContext
26
28
  from ..messages import (
@@ -90,7 +92,7 @@ class InstrumentationSettings:
90
92
  event_mode: Literal['attributes', 'logs'] = 'attributes'
91
93
  include_binary_content: bool = True
92
94
  include_content: bool = True
93
- version: Literal[1, 2] = 1
95
+ version: Literal[1, 2, 3] = DEFAULT_INSTRUMENTATION_VERSION
94
96
 
95
97
  def __init__(
96
98
  self,
@@ -99,7 +101,7 @@ class InstrumentationSettings:
99
101
  meter_provider: MeterProvider | None = None,
100
102
  include_binary_content: bool = True,
101
103
  include_content: bool = True,
102
- version: Literal[1, 2] = 2,
104
+ version: Literal[1, 2, 3] = DEFAULT_INSTRUMENTATION_VERSION,
103
105
  event_mode: Literal['attributes', 'logs'] = 'attributes',
104
106
  event_logger_provider: EventLoggerProvider | None = None,
105
107
  ):
@@ -352,8 +354,12 @@ class InstrumentedModel(WrapperModel):
352
354
  model_settings: ModelSettings | None,
353
355
  model_request_parameters: ModelRequestParameters,
354
356
  ) -> ModelResponse:
355
- with self._instrument(messages, model_settings, model_request_parameters) as finish:
356
- response = await super().request(messages, model_settings, model_request_parameters)
357
+ prepared_settings, prepared_parameters = self.wrapped.prepare_request(
358
+ model_settings,
359
+ model_request_parameters,
360
+ )
361
+ with self._instrument(messages, prepared_settings, prepared_parameters) as finish:
362
+ response = await self.wrapped.request(messages, model_settings, model_request_parameters)
357
363
  finish(response)
358
364
  return response
359
365
 
@@ -365,10 +371,14 @@ class InstrumentedModel(WrapperModel):
365
371
  model_request_parameters: ModelRequestParameters,
366
372
  run_context: RunContext[Any] | None = None,
367
373
  ) -> AsyncIterator[StreamedResponse]:
368
- with self._instrument(messages, model_settings, model_request_parameters) as finish:
374
+ prepared_settings, prepared_parameters = self.wrapped.prepare_request(
375
+ model_settings,
376
+ model_request_parameters,
377
+ )
378
+ with self._instrument(messages, prepared_settings, prepared_parameters) as finish:
369
379
  response_stream: StreamedResponse | None = None
370
380
  try:
371
- async with super().request_stream(
381
+ async with self.wrapped.request_stream(
372
382
  messages, model_settings, model_request_parameters, run_context
373
383
  ) as response_stream:
374
384
  yield response_stream
@@ -52,6 +52,8 @@ class MCPSamplingModel(Model):
52
52
  model_request_parameters: ModelRequestParameters,
53
53
  ) -> ModelResponse:
54
54
  system_prompt, sampling_messages = _mcp.map_from_pai_messages(messages)
55
+
56
+ model_settings, _ = self.prepare_request(model_settings, model_request_parameters)
55
57
  model_settings = cast(MCPSamplingModelSettings, model_settings or {})
56
58
 
57
59
  result = await self.session.create_message(
@@ -185,6 +185,10 @@ class MistralModel(Model):
185
185
  ) -> ModelResponse:
186
186
  """Make a non-streaming request to the model from Pydantic AI call."""
187
187
  check_allow_model_requests()
188
+ model_settings, model_request_parameters = self.prepare_request(
189
+ model_settings,
190
+ model_request_parameters,
191
+ )
188
192
  response = await self._completions_create(
189
193
  messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters
190
194
  )
@@ -201,6 +205,10 @@ class MistralModel(Model):
201
205
  ) -> AsyncIterator[StreamedResponse]:
202
206
  """Make a streaming request to the model from Pydantic AI call."""
203
207
  check_allow_model_requests()
208
+ model_settings, model_request_parameters = self.prepare_request(
209
+ model_settings,
210
+ model_request_parameters,
211
+ )
204
212
  response = await self._stream_completions_create(
205
213
  messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters
206
214
  )
@@ -393,6 +393,10 @@ class OpenAIChatModel(Model):
393
393
  model_request_parameters: ModelRequestParameters,
394
394
  ) -> ModelResponse:
395
395
  check_allow_model_requests()
396
+ model_settings, model_request_parameters = self.prepare_request(
397
+ model_settings,
398
+ model_request_parameters,
399
+ )
396
400
  response = await self._completions_create(
397
401
  messages, False, cast(OpenAIChatModelSettings, model_settings or {}), model_request_parameters
398
402
  )
@@ -408,6 +412,10 @@ class OpenAIChatModel(Model):
408
412
  run_context: RunContext[Any] | None = None,
409
413
  ) -> AsyncIterator[StreamedResponse]:
410
414
  check_allow_model_requests()
415
+ model_settings, model_request_parameters = self.prepare_request(
416
+ model_settings,
417
+ model_request_parameters,
418
+ )
411
419
  response = await self._completions_create(
412
420
  messages, True, cast(OpenAIChatModelSettings, model_settings or {}), model_request_parameters
413
421
  )
@@ -926,6 +934,10 @@ class OpenAIResponsesModel(Model):
926
934
  model_request_parameters: ModelRequestParameters,
927
935
  ) -> ModelResponse:
928
936
  check_allow_model_requests()
937
+ model_settings, model_request_parameters = self.prepare_request(
938
+ model_settings,
939
+ model_request_parameters,
940
+ )
929
941
  response = await self._responses_create(
930
942
  messages, False, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
931
943
  )
@@ -940,6 +952,10 @@ class OpenAIResponsesModel(Model):
940
952
  run_context: RunContext[Any] | None = None,
941
953
  ) -> AsyncIterator[StreamedResponse]:
942
954
  check_allow_model_requests()
955
+ model_settings, model_request_parameters = self.prepare_request(
956
+ model_settings,
957
+ model_request_parameters,
958
+ )
943
959
  response = await self._responses_create(
944
960
  messages, True, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
945
961
  )
@@ -1516,7 +1532,6 @@ class OpenAIStreamedResponse(StreamedResponse):
1516
1532
 
1517
1533
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1518
1534
  async for chunk in self._response:
1519
- print(chunk)
1520
1535
  self._usage += _map_usage(chunk)
1521
1536
 
1522
1537
  if chunk.id: # pragma: no branch
@@ -110,6 +110,10 @@ class TestModel(Model):
110
110
  model_settings: ModelSettings | None,
111
111
  model_request_parameters: ModelRequestParameters,
112
112
  ) -> ModelResponse:
113
+ model_settings, model_request_parameters = self.prepare_request(
114
+ model_settings,
115
+ model_request_parameters,
116
+ )
113
117
  self.last_model_request_parameters = model_request_parameters
114
118
  model_response = self._request(messages, model_settings, model_request_parameters)
115
119
  model_response.usage = _estimate_usage([*messages, model_response])
@@ -123,6 +127,10 @@ class TestModel(Model):
123
127
  model_request_parameters: ModelRequestParameters,
124
128
  run_context: RunContext[Any] | None = None,
125
129
  ) -> AsyncIterator[StreamedResponse]:
130
+ model_settings, model_request_parameters = self.prepare_request(
131
+ model_settings,
132
+ model_request_parameters,
133
+ )
126
134
  self.last_model_request_parameters = model_request_parameters
127
135
 
128
136
  model_response = self._request(messages, model_settings, model_request_parameters)
@@ -46,6 +46,13 @@ class WrapperModel(Model):
46
46
  def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
47
47
  return self.wrapped.customize_request_parameters(model_request_parameters)
48
48
 
49
+ def prepare_request(
50
+ self,
51
+ model_settings: ModelSettings | None,
52
+ model_request_parameters: ModelRequestParameters,
53
+ ) -> tuple[ModelSettings | None, ModelRequestParameters]:
54
+ return self.wrapped.prepare_request(model_settings, model_request_parameters)
55
+
49
56
  @property
50
57
  def model_name(self) -> str:
51
58
  return self.wrapped.model_name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.12
3
+ Version: 1.0.14
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.28
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.12
36
+ Requires-Dist: pydantic-graph==1.0.14
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.12; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.14; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq