pydantic-ai-slim 1.0.11__py3-none-any.whl → 1.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (80) hide show
  1. pydantic_ai/__init__.py +134 -4
  2. pydantic_ai/_a2a.py +1 -1
  3. pydantic_ai/_agent_graph.py +4 -0
  4. pydantic_ai/_instrumentation.py +95 -0
  5. pydantic_ai/{profiles/_json_schema.py → _json_schema.py} +5 -3
  6. pydantic_ai/_output.py +26 -12
  7. pydantic_ai/_run_context.py +4 -0
  8. pydantic_ai/_thinking_part.py +1 -1
  9. pydantic_ai/_tool_manager.py +15 -7
  10. pydantic_ai/_utils.py +24 -7
  11. pydantic_ai/agent/__init__.py +68 -36
  12. pydantic_ai/agent/abstract.py +12 -1
  13. pydantic_ai/agent/wrapper.py +11 -3
  14. pydantic_ai/builtin_tools.py +20 -1
  15. pydantic_ai/common_tools/duckduckgo.py +2 -2
  16. pydantic_ai/common_tools/tavily.py +2 -2
  17. pydantic_ai/direct.py +6 -6
  18. pydantic_ai/durable_exec/dbos/_agent.py +12 -3
  19. pydantic_ai/durable_exec/dbos/_mcp_server.py +1 -2
  20. pydantic_ai/durable_exec/dbos/_model.py +2 -2
  21. pydantic_ai/durable_exec/temporal/_agent.py +13 -4
  22. pydantic_ai/durable_exec/temporal/_function_toolset.py +1 -1
  23. pydantic_ai/durable_exec/temporal/_mcp_server.py +1 -1
  24. pydantic_ai/durable_exec/temporal/_model.py +3 -3
  25. pydantic_ai/durable_exec/temporal/_toolset.py +1 -3
  26. pydantic_ai/ext/aci.py +1 -1
  27. pydantic_ai/ext/langchain.py +1 -1
  28. pydantic_ai/mcp.py +32 -8
  29. pydantic_ai/messages.py +14 -11
  30. pydantic_ai/models/__init__.py +19 -2
  31. pydantic_ai/models/anthropic.py +29 -14
  32. pydantic_ai/models/bedrock.py +14 -5
  33. pydantic_ai/models/cohere.py +4 -0
  34. pydantic_ai/models/fallback.py +2 -9
  35. pydantic_ai/models/function.py +8 -0
  36. pydantic_ai/models/gemini.py +8 -0
  37. pydantic_ai/models/google.py +14 -2
  38. pydantic_ai/models/groq.py +8 -0
  39. pydantic_ai/models/huggingface.py +8 -2
  40. pydantic_ai/models/instrumented.py +16 -6
  41. pydantic_ai/models/mcp_sampling.py +2 -0
  42. pydantic_ai/models/mistral.py +8 -0
  43. pydantic_ai/models/openai.py +95 -29
  44. pydantic_ai/models/test.py +8 -0
  45. pydantic_ai/models/wrapper.py +7 -0
  46. pydantic_ai/output.py +11 -1
  47. pydantic_ai/profiles/__init__.py +1 -1
  48. pydantic_ai/profiles/google.py +1 -1
  49. pydantic_ai/profiles/openai.py +1 -1
  50. pydantic_ai/providers/__init__.py +1 -1
  51. pydantic_ai/providers/anthropic.py +1 -1
  52. pydantic_ai/providers/azure.py +1 -1
  53. pydantic_ai/providers/bedrock.py +1 -1
  54. pydantic_ai/providers/cerebras.py +1 -1
  55. pydantic_ai/providers/cohere.py +1 -1
  56. pydantic_ai/providers/deepseek.py +1 -1
  57. pydantic_ai/providers/fireworks.py +1 -1
  58. pydantic_ai/providers/github.py +1 -1
  59. pydantic_ai/providers/google.py +1 -1
  60. pydantic_ai/providers/google_gla.py +1 -1
  61. pydantic_ai/providers/google_vertex.py +1 -1
  62. pydantic_ai/providers/grok.py +1 -1
  63. pydantic_ai/providers/groq.py +1 -1
  64. pydantic_ai/providers/heroku.py +1 -1
  65. pydantic_ai/providers/huggingface.py +1 -1
  66. pydantic_ai/providers/litellm.py +1 -1
  67. pydantic_ai/providers/mistral.py +1 -1
  68. pydantic_ai/providers/moonshotai.py +1 -1
  69. pydantic_ai/providers/ollama.py +1 -1
  70. pydantic_ai/providers/openai.py +1 -1
  71. pydantic_ai/providers/openrouter.py +1 -1
  72. pydantic_ai/providers/together.py +1 -1
  73. pydantic_ai/providers/vercel.py +1 -1
  74. pydantic_ai/toolsets/function.py +1 -2
  75. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/METADATA +3 -3
  76. pydantic_ai_slim-1.0.13.dist-info/RECORD +128 -0
  77. pydantic_ai_slim-1.0.11.dist-info/RECORD +0 -127
  78. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/WHEEL +0 -0
  79. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/entry_points.txt +0 -0
  80. {pydantic_ai_slim-1.0.11.dist-info → pydantic_ai_slim-1.0.13.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/mcp.py CHANGED
@@ -112,6 +112,7 @@ class MCPServer(AbstractToolset[Any], ABC):
112
112
  _client: ClientSession
113
113
  _read_stream: MemoryObjectReceiveStream[SessionMessage | Exception]
114
114
  _write_stream: MemoryObjectSendStream[SessionMessage]
115
+ _server_info: mcp_types.Implementation
115
116
 
116
117
  def __init__(
117
118
  self,
@@ -177,6 +178,15 @@ class MCPServer(AbstractToolset[Any], ABC):
177
178
  def tool_name_conflict_hint(self) -> str:
178
179
  return 'Set the `tool_prefix` attribute to avoid name conflicts.'
179
180
 
181
+ @property
182
+ def server_info(self) -> mcp_types.Implementation:
183
+ """Access the information send by the MCP server during initialization."""
184
+ if getattr(self, '_server_info', None) is None:
185
+ raise AttributeError(
186
+ f'The `{self.__class__.__name__}.server_info` is only instantiated after initialization.'
187
+ )
188
+ return self._server_info
189
+
180
190
  async def list_tools(self) -> list[mcp_types.Tool]:
181
191
  """Retrieve tools that are currently active on the server.
182
192
 
@@ -225,13 +235,27 @@ class MCPServer(AbstractToolset[Any], ABC):
225
235
  except McpError as e:
226
236
  raise exceptions.ModelRetry(e.error.message)
227
237
 
228
- content = [await self._map_tool_result_part(part) for part in result.content]
229
-
230
238
  if result.isError:
231
- text = '\n'.join(str(part) for part in content)
232
- raise exceptions.ModelRetry(text)
233
- else:
234
- return content[0] if len(content) == 1 else content
239
+ message: str | None = None
240
+ if result.content: # pragma: no branch
241
+ text_parts = [part.text for part in result.content if isinstance(part, mcp_types.TextContent)]
242
+ message = '\n'.join(text_parts)
243
+
244
+ raise exceptions.ModelRetry(message or 'MCP tool call failed')
245
+
246
+ # Prefer structured content if there are only text parts, which per the docs would contain the JSON-encoded structured content for backward compatibility.
247
+ # See https://github.com/modelcontextprotocol/python-sdk#structured-output
248
+ if (structured := result.structuredContent) and not any(
249
+ not isinstance(part, mcp_types.TextContent) for part in result.content
250
+ ):
251
+ # The MCP SDK wraps primitives and generic types like list in a `result` key, but we want to use the raw value returned by the tool function.
252
+ # See https://github.com/modelcontextprotocol/python-sdk#structured-output
253
+ if isinstance(structured, dict) and len(structured) == 1 and 'result' in structured:
254
+ return structured['result']
255
+ return structured
256
+
257
+ mapped = [await self._map_tool_result_part(part) for part in result.content]
258
+ return mapped[0] if len(mapped) == 1 else mapped
235
259
 
236
260
  async def call_tool(
237
261
  self,
@@ -298,8 +322,8 @@ class MCPServer(AbstractToolset[Any], ABC):
298
322
  self._client = await exit_stack.enter_async_context(client)
299
323
 
300
324
  with anyio.fail_after(self.timeout):
301
- await self._client.initialize()
302
-
325
+ result = await self._client.initialize()
326
+ self._server_info = result.serverInfo
303
327
  if log_level := self.log_level:
304
328
  await self._client.set_logging_level(log_level)
305
329
 
pydantic_ai/messages.py CHANGED
@@ -114,6 +114,20 @@ class FileUrl(ABC):
114
114
 
115
115
  _: KW_ONLY
116
116
 
117
+ identifier: str
118
+ """The identifier of the file, such as a unique ID. generating one from the url if not explicitly set.
119
+
120
+ This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
121
+ and the tool can look up the file in question by iterating over the message history and finding the matching `FileUrl`.
122
+
123
+ This identifier is only automatically passed to the model when the `FileUrl` is returned by a tool.
124
+ If you're passing the `FileUrl` as a user message, it's up to you to include a separate text part with the identifier,
125
+ e.g. "This is file <identifier>:" preceding the `FileUrl`.
126
+
127
+ It's also included in inline-text delimiters for providers that require inlining text documents, so the model can
128
+ distinguish multiple files.
129
+ """
130
+
117
131
  force_download: bool = False
118
132
  """If the model supports it:
119
133
 
@@ -133,17 +147,6 @@ class FileUrl(ABC):
133
147
  compare=False, default=None
134
148
  )
135
149
 
136
- identifier: str | None = None
137
- """The identifier of the file, such as a unique ID. generating one from the url if not explicitly set
138
-
139
- This identifier can be provided to the model in a message to allow it to refer to this file in a tool call argument,
140
- and the tool can look up the file in question by iterating over the message history and finding the matching `FileUrl`.
141
-
142
- This identifier is only automatically passed to the model when the `FileUrl` is returned by a tool.
143
- If you're passing the `FileUrl` as a user message, it's up to you to include a separate text part with the identifier,
144
- e.g. "This is file <identifier>:" preceding the `FileUrl`.
145
- """
146
-
147
150
  def __init__(
148
151
  self,
149
152
  url: str,
@@ -20,6 +20,7 @@ import httpx
20
20
  from typing_extensions import TypeAliasType, TypedDict
21
21
 
22
22
  from .. import _utils
23
+ from .._json_schema import JsonSchemaTransformer
23
24
  from .._output import OutputObjectDefinition
24
25
  from .._parts_manager import ModelResponsePartsManager
25
26
  from .._run_context import RunContext
@@ -40,8 +41,7 @@ from ..messages import (
40
41
  )
41
42
  from ..output import OutputMode
42
43
  from ..profiles import DEFAULT_PROFILE, ModelProfile, ModelProfileSpec
43
- from ..profiles._json_schema import JsonSchemaTransformer
44
- from ..settings import ModelSettings
44
+ from ..settings import ModelSettings, merge_model_settings
45
45
  from ..tools import ToolDefinition
46
46
  from ..usage import RequestUsage
47
47
 
@@ -390,6 +390,23 @@ class Model(ABC):
390
390
 
391
391
  return model_request_parameters
392
392
 
393
+ def prepare_request(
394
+ self,
395
+ model_settings: ModelSettings | None,
396
+ model_request_parameters: ModelRequestParameters,
397
+ ) -> tuple[ModelSettings | None, ModelRequestParameters]:
398
+ """Prepare request inputs before they are passed to the provider.
399
+
400
+ This merges the given ``model_settings`` with the model's own ``settings`` attribute and ensures
401
+ ``customize_request_parameters`` is applied to the resolved
402
+ [`ModelRequestParameters`][pydantic_ai.models.ModelRequestParameters]. Subclasses can override this method if
403
+ they need to customize the preparation flow further, but most implementations should simply call
404
+ ``self.prepare_request(...)`` at the start of their ``request`` (and related) methods.
405
+ """
406
+ merged_settings = merge_model_settings(self.settings, model_settings)
407
+ customized_parameters = self.customize_request_parameters(model_request_parameters)
408
+ return merged_settings, customized_parameters
409
+
393
410
  @property
394
411
  @abstractmethod
395
412
  def model_name(self) -> str:
@@ -10,11 +10,10 @@ from typing import Any, Literal, cast, overload
10
10
  from pydantic import TypeAdapter
11
11
  from typing_extensions import assert_never
12
12
 
13
- from pydantic_ai.builtin_tools import CodeExecutionTool, WebSearchTool
14
-
15
13
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
16
14
  from .._run_context import RunContext
17
15
  from .._utils import guard_tool_call_id as _guard_tool_call_id
16
+ from ..builtin_tools import CodeExecutionTool, MemoryTool, WebSearchTool
18
17
  from ..exceptions import UserError
19
18
  from ..messages import (
20
19
  BinaryContent,
@@ -68,6 +67,7 @@ try:
68
67
  BetaContentBlockParam,
69
68
  BetaImageBlockParam,
70
69
  BetaInputJSONDelta,
70
+ BetaMemoryTool20250818Param,
71
71
  BetaMessage,
72
72
  BetaMessageParam,
73
73
  BetaMetadataParam,
@@ -205,6 +205,10 @@ class AnthropicModel(Model):
205
205
  model_request_parameters: ModelRequestParameters,
206
206
  ) -> ModelResponse:
207
207
  check_allow_model_requests()
208
+ model_settings, model_request_parameters = self.prepare_request(
209
+ model_settings,
210
+ model_request_parameters,
211
+ )
208
212
  response = await self._messages_create(
209
213
  messages, False, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
210
214
  )
@@ -220,6 +224,10 @@ class AnthropicModel(Model):
220
224
  run_context: RunContext[Any] | None = None,
221
225
  ) -> AsyncIterator[StreamedResponse]:
222
226
  check_allow_model_requests()
227
+ model_settings, model_request_parameters = self.prepare_request(
228
+ model_settings,
229
+ model_request_parameters,
230
+ )
223
231
  response = await self._messages_create(
224
232
  messages, True, cast(AnthropicModelSettings, model_settings or {}), model_request_parameters
225
233
  )
@@ -255,8 +263,7 @@ class AnthropicModel(Model):
255
263
  ) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
256
264
  # standalone function to make it easier to override
257
265
  tools = self._get_tools(model_request_parameters)
258
- builtin_tools, tool_headers = self._get_builtin_tools(model_request_parameters)
259
- tools += builtin_tools
266
+ tools, beta_features = self._add_builtin_tools(tools, model_request_parameters)
260
267
 
261
268
  tool_choice: BetaToolChoiceParam | None
262
269
 
@@ -279,9 +286,11 @@ class AnthropicModel(Model):
279
286
 
280
287
  try:
281
288
  extra_headers = model_settings.get('extra_headers', {})
282
- for k, v in tool_headers.items():
283
- extra_headers.setdefault(k, v)
284
289
  extra_headers.setdefault('User-Agent', get_user_agent())
290
+ if beta_features:
291
+ if 'anthropic-beta' in extra_headers:
292
+ beta_features.insert(0, extra_headers['anthropic-beta'])
293
+ extra_headers['anthropic-beta'] = ','.join(beta_features)
285
294
 
286
295
  return await self.client.beta.messages.create(
287
296
  max_tokens=model_settings.get('max_tokens', 4096),
@@ -367,14 +376,13 @@ class AnthropicModel(Model):
367
376
  _provider_name=self._provider.name,
368
377
  )
369
378
 
370
- def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[BetaToolParam]:
379
+ def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[BetaToolUnionParam]:
371
380
  return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]
372
381
 
373
- def _get_builtin_tools(
374
- self, model_request_parameters: ModelRequestParameters
375
- ) -> tuple[list[BetaToolUnionParam], dict[str, str]]:
376
- tools: list[BetaToolUnionParam] = []
377
- extra_headers: dict[str, str] = {}
382
+ def _add_builtin_tools(
383
+ self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters
384
+ ) -> tuple[list[BetaToolUnionParam], list[str]]:
385
+ beta_features: list[str] = []
378
386
  for tool in model_request_parameters.builtin_tools:
379
387
  if isinstance(tool, WebSearchTool):
380
388
  user_location = UserLocation(type='approximate', **tool.user_location) if tool.user_location else None
@@ -389,13 +397,20 @@ class AnthropicModel(Model):
389
397
  )
390
398
  )
391
399
  elif isinstance(tool, CodeExecutionTool): # pragma: no branch
392
- extra_headers['anthropic-beta'] = 'code-execution-2025-05-22'
393
400
  tools.append(BetaCodeExecutionTool20250522Param(name='code_execution', type='code_execution_20250522'))
401
+ beta_features.append('code-execution-2025-05-22')
402
+ elif isinstance(tool, MemoryTool): # pragma: no branch
403
+ if 'memory' not in model_request_parameters.tool_defs:
404
+ raise UserError("Built-in `MemoryTool` requires a 'memory' tool to be defined.")
405
+ # Replace the memory tool definition with the built-in memory tool
406
+ tools = [tool for tool in tools if tool['name'] != 'memory']
407
+ tools.append(BetaMemoryTool20250818Param(name='memory', type='memory_20250818'))
408
+ beta_features.append('context-management-2025-06-27')
394
409
  else: # pragma: no cover
395
410
  raise UserError(
396
411
  f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.'
397
412
  )
398
- return tools, extra_headers
413
+ return tools, beta_features
399
414
 
400
415
  async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]: # noqa: C901
401
416
  """Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`."""
@@ -13,10 +13,7 @@ import anyio
13
13
  import anyio.to_thread
14
14
  from typing_extensions import ParamSpec, assert_never
15
15
 
16
- from pydantic_ai import _utils, usage
17
- from pydantic_ai._run_context import RunContext
18
- from pydantic_ai.exceptions import UserError
19
- from pydantic_ai.messages import (
16
+ from pydantic_ai import (
20
17
  AudioUrl,
21
18
  BinaryContent,
22
19
  BuiltinToolCallPart,
@@ -25,6 +22,7 @@ from pydantic_ai.messages import (
25
22
  FinishReason,
26
23
  ImageUrl,
27
24
  ModelMessage,
25
+ ModelProfileSpec,
28
26
  ModelRequest,
29
27
  ModelResponse,
30
28
  ModelResponsePart,
@@ -37,9 +35,12 @@ from pydantic_ai.messages import (
37
35
  ToolReturnPart,
38
36
  UserPromptPart,
39
37
  VideoUrl,
38
+ _utils,
39
+ usage,
40
40
  )
41
+ from pydantic_ai._run_context import RunContext
42
+ from pydantic_ai.exceptions import UserError
41
43
  from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse, download_item
42
- from pydantic_ai.profiles import ModelProfileSpec
43
44
  from pydantic_ai.providers import Provider, infer_provider
44
45
  from pydantic_ai.providers.bedrock import BedrockModelProfile
45
46
  from pydantic_ai.settings import ModelSettings
@@ -263,6 +264,10 @@ class BedrockConverseModel(Model):
263
264
  model_settings: ModelSettings | None,
264
265
  model_request_parameters: ModelRequestParameters,
265
266
  ) -> ModelResponse:
267
+ model_settings, model_request_parameters = self.prepare_request(
268
+ model_settings,
269
+ model_request_parameters,
270
+ )
266
271
  settings = cast(BedrockModelSettings, model_settings or {})
267
272
  response = await self._messages_create(messages, False, settings, model_request_parameters)
268
273
  model_response = await self._process_response(response)
@@ -276,6 +281,10 @@ class BedrockConverseModel(Model):
276
281
  model_request_parameters: ModelRequestParameters,
277
282
  run_context: RunContext[Any] | None = None,
278
283
  ) -> AsyncIterator[StreamedResponse]:
284
+ model_settings, model_request_parameters = self.prepare_request(
285
+ model_settings,
286
+ model_request_parameters,
287
+ )
279
288
  settings = cast(BedrockModelSettings, model_settings or {})
280
289
  response = await self._messages_create(messages, True, settings, model_request_parameters)
281
290
  yield BedrockStreamedResponse(
@@ -165,6 +165,10 @@ class CohereModel(Model):
165
165
  model_request_parameters: ModelRequestParameters,
166
166
  ) -> ModelResponse:
167
167
  check_allow_model_requests()
168
+ model_settings, model_request_parameters = self.prepare_request(
169
+ model_settings,
170
+ model_request_parameters,
171
+ )
168
172
  response = await self._chat(messages, cast(CohereModelSettings, model_settings or {}), model_request_parameters)
169
173
  model_response = self._process_response(response)
170
174
  return model_response
@@ -11,7 +11,6 @@ from pydantic_ai._run_context import RunContext
11
11
  from pydantic_ai.models.instrumented import InstrumentedModel
12
12
 
13
13
  from ..exceptions import FallbackExceptionGroup, ModelHTTPError
14
- from ..settings import merge_model_settings
15
14
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model
16
15
 
17
16
  if TYPE_CHECKING:
@@ -78,10 +77,8 @@ class FallbackModel(Model):
78
77
  exceptions: list[Exception] = []
79
78
 
80
79
  for model in self.models:
81
- customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
82
- merged_settings = merge_model_settings(model.settings, model_settings)
83
80
  try:
84
- response = await model.request(messages, merged_settings, customized_model_request_parameters)
81
+ response = await model.request(messages, model_settings, model_request_parameters)
85
82
  except Exception as exc:
86
83
  if self._fallback_on(exc):
87
84
  exceptions.append(exc)
@@ -105,14 +102,10 @@ class FallbackModel(Model):
105
102
  exceptions: list[Exception] = []
106
103
 
107
104
  for model in self.models:
108
- customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
109
- merged_settings = merge_model_settings(model.settings, model_settings)
110
105
  async with AsyncExitStack() as stack:
111
106
  try:
112
107
  response = await stack.enter_async_context(
113
- model.request_stream(
114
- messages, merged_settings, customized_model_request_parameters, run_context
115
- )
108
+ model.request_stream(messages, model_settings, model_request_parameters, run_context)
116
109
  )
117
110
  except Exception as exc:
118
111
  if self._fallback_on(exc):
@@ -125,6 +125,10 @@ class FunctionModel(Model):
125
125
  model_settings: ModelSettings | None,
126
126
  model_request_parameters: ModelRequestParameters,
127
127
  ) -> ModelResponse:
128
+ model_settings, model_request_parameters = self.prepare_request(
129
+ model_settings,
130
+ model_request_parameters,
131
+ )
128
132
  agent_info = AgentInfo(
129
133
  function_tools=model_request_parameters.function_tools,
130
134
  allow_text_output=model_request_parameters.allow_text_output,
@@ -154,6 +158,10 @@ class FunctionModel(Model):
154
158
  model_request_parameters: ModelRequestParameters,
155
159
  run_context: RunContext[Any] | None = None,
156
160
  ) -> AsyncIterator[StreamedResponse]:
161
+ model_settings, model_request_parameters = self.prepare_request(
162
+ model_settings,
163
+ model_request_parameters,
164
+ )
157
165
  agent_info = AgentInfo(
158
166
  function_tools=model_request_parameters.function_tools,
159
167
  allow_text_output=model_request_parameters.allow_text_output,
@@ -155,6 +155,10 @@ class GeminiModel(Model):
155
155
  model_request_parameters: ModelRequestParameters,
156
156
  ) -> ModelResponse:
157
157
  check_allow_model_requests()
158
+ model_settings, model_request_parameters = self.prepare_request(
159
+ model_settings,
160
+ model_request_parameters,
161
+ )
158
162
  async with self._make_request(
159
163
  messages, False, cast(GeminiModelSettings, model_settings or {}), model_request_parameters
160
164
  ) as http_response:
@@ -171,6 +175,10 @@ class GeminiModel(Model):
171
175
  run_context: RunContext[Any] | None = None,
172
176
  ) -> AsyncIterator[StreamedResponse]:
173
177
  check_allow_model_requests()
178
+ model_settings, model_request_parameters = self.prepare_request(
179
+ model_settings,
180
+ model_request_parameters,
181
+ )
174
182
  async with self._make_request(
175
183
  messages, True, cast(GeminiModelSettings, model_settings or {}), model_request_parameters
176
184
  ) as http_response:
@@ -225,6 +225,10 @@ class GoogleModel(Model):
225
225
  model_request_parameters: ModelRequestParameters,
226
226
  ) -> ModelResponse:
227
227
  check_allow_model_requests()
228
+ model_settings, model_request_parameters = self.prepare_request(
229
+ model_settings,
230
+ model_request_parameters,
231
+ )
228
232
  model_settings = cast(GoogleModelSettings, model_settings or {})
229
233
  response = await self._generate_content(messages, False, model_settings, model_request_parameters)
230
234
  return self._process_response(response)
@@ -236,6 +240,10 @@ class GoogleModel(Model):
236
240
  model_request_parameters: ModelRequestParameters,
237
241
  ) -> usage.RequestUsage:
238
242
  check_allow_model_requests()
243
+ model_settings, model_request_parameters = self.prepare_request(
244
+ model_settings,
245
+ model_request_parameters,
246
+ )
239
247
  model_settings = cast(GoogleModelSettings, model_settings or {})
240
248
  contents, generation_config = await self._build_content_and_config(
241
249
  messages, model_settings, model_request_parameters
@@ -291,6 +299,10 @@ class GoogleModel(Model):
291
299
  run_context: RunContext[Any] | None = None,
292
300
  ) -> AsyncIterator[StreamedResponse]:
293
301
  check_allow_model_requests()
302
+ model_settings, model_request_parameters = self.prepare_request(
303
+ model_settings,
304
+ model_request_parameters,
305
+ )
294
306
  model_settings = cast(GoogleModelSettings, model_settings or {})
295
307
  response = await self._generate_content(messages, True, model_settings, model_request_parameters)
296
308
  yield await self._process_streamed_response(response, model_request_parameters) # type: ignore
@@ -419,8 +431,8 @@ class GoogleModel(Model):
419
431
  return contents, config
420
432
 
421
433
  def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
422
- if not response.candidates or len(response.candidates) != 1:
423
- raise UnexpectedModelBehavior('Expected exactly one candidate in Gemini response') # pragma: no cover
434
+ if not response.candidates:
435
+ raise UnexpectedModelBehavior('Expected at least one candidate in Gemini response') # pragma: no cover
424
436
  candidate = response.candidates[0]
425
437
  if candidate.content is None or candidate.content.parts is None:
426
438
  if candidate.finish_reason == 'SAFETY':
@@ -182,6 +182,10 @@ class GroqModel(Model):
182
182
  model_request_parameters: ModelRequestParameters,
183
183
  ) -> ModelResponse:
184
184
  check_allow_model_requests()
185
+ model_settings, model_request_parameters = self.prepare_request(
186
+ model_settings,
187
+ model_request_parameters,
188
+ )
185
189
  try:
186
190
  response = await self._completions_create(
187
191
  messages, False, cast(GroqModelSettings, model_settings or {}), model_request_parameters
@@ -218,6 +222,10 @@ class GroqModel(Model):
218
222
  run_context: RunContext[Any] | None = None,
219
223
  ) -> AsyncIterator[StreamedResponse]:
220
224
  check_allow_model_requests()
225
+ model_settings, model_request_parameters = self.prepare_request(
226
+ model_settings,
227
+ model_request_parameters,
228
+ )
221
229
  response = await self._completions_create(
222
230
  messages, True, cast(GroqModelSettings, model_settings or {}), model_request_parameters
223
231
  )
@@ -166,6 +166,10 @@ class HuggingFaceModel(Model):
166
166
  model_request_parameters: ModelRequestParameters,
167
167
  ) -> ModelResponse:
168
168
  check_allow_model_requests()
169
+ model_settings, model_request_parameters = self.prepare_request(
170
+ model_settings,
171
+ model_request_parameters,
172
+ )
169
173
  response = await self._completions_create(
170
174
  messages, False, cast(HuggingFaceModelSettings, model_settings or {}), model_request_parameters
171
175
  )
@@ -181,6 +185,10 @@ class HuggingFaceModel(Model):
181
185
  run_context: RunContext[Any] | None = None,
182
186
  ) -> AsyncIterator[StreamedResponse]:
183
187
  check_allow_model_requests()
188
+ model_settings, model_request_parameters = self.prepare_request(
189
+ model_settings,
190
+ model_request_parameters,
191
+ )
184
192
  response = await self._completions_create(
185
193
  messages, True, cast(HuggingFaceModelSettings, model_settings or {}), model_request_parameters
186
194
  )
@@ -377,8 +385,6 @@ class HuggingFaceModel(Model):
377
385
  },
378
386
  }
379
387
  )
380
- if f.strict is not None:
381
- tool_param['function']['strict'] = f.strict
382
388
  return tool_param
383
389
 
384
390
  async def _map_user_message(
@@ -21,6 +21,8 @@ from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provide
21
21
  from opentelemetry.util.types import AttributeValue
22
22
  from pydantic import TypeAdapter
23
23
 
24
+ from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION
25
+
24
26
  from .. import _otel_messages
25
27
  from .._run_context import RunContext
26
28
  from ..messages import (
@@ -90,7 +92,7 @@ class InstrumentationSettings:
90
92
  event_mode: Literal['attributes', 'logs'] = 'attributes'
91
93
  include_binary_content: bool = True
92
94
  include_content: bool = True
93
- version: Literal[1, 2] = 1
95
+ version: Literal[1, 2, 3] = DEFAULT_INSTRUMENTATION_VERSION
94
96
 
95
97
  def __init__(
96
98
  self,
@@ -99,7 +101,7 @@ class InstrumentationSettings:
99
101
  meter_provider: MeterProvider | None = None,
100
102
  include_binary_content: bool = True,
101
103
  include_content: bool = True,
102
- version: Literal[1, 2] = 2,
104
+ version: Literal[1, 2, 3] = DEFAULT_INSTRUMENTATION_VERSION,
103
105
  event_mode: Literal['attributes', 'logs'] = 'attributes',
104
106
  event_logger_provider: EventLoggerProvider | None = None,
105
107
  ):
@@ -352,8 +354,12 @@ class InstrumentedModel(WrapperModel):
352
354
  model_settings: ModelSettings | None,
353
355
  model_request_parameters: ModelRequestParameters,
354
356
  ) -> ModelResponse:
355
- with self._instrument(messages, model_settings, model_request_parameters) as finish:
356
- response = await super().request(messages, model_settings, model_request_parameters)
357
+ prepared_settings, prepared_parameters = self.wrapped.prepare_request(
358
+ model_settings,
359
+ model_request_parameters,
360
+ )
361
+ with self._instrument(messages, prepared_settings, prepared_parameters) as finish:
362
+ response = await self.wrapped.request(messages, model_settings, model_request_parameters)
357
363
  finish(response)
358
364
  return response
359
365
 
@@ -365,10 +371,14 @@ class InstrumentedModel(WrapperModel):
365
371
  model_request_parameters: ModelRequestParameters,
366
372
  run_context: RunContext[Any] | None = None,
367
373
  ) -> AsyncIterator[StreamedResponse]:
368
- with self._instrument(messages, model_settings, model_request_parameters) as finish:
374
+ prepared_settings, prepared_parameters = self.wrapped.prepare_request(
375
+ model_settings,
376
+ model_request_parameters,
377
+ )
378
+ with self._instrument(messages, prepared_settings, prepared_parameters) as finish:
369
379
  response_stream: StreamedResponse | None = None
370
380
  try:
371
- async with super().request_stream(
381
+ async with self.wrapped.request_stream(
372
382
  messages, model_settings, model_request_parameters, run_context
373
383
  ) as response_stream:
374
384
  yield response_stream
@@ -52,6 +52,8 @@ class MCPSamplingModel(Model):
52
52
  model_request_parameters: ModelRequestParameters,
53
53
  ) -> ModelResponse:
54
54
  system_prompt, sampling_messages = _mcp.map_from_pai_messages(messages)
55
+
56
+ model_settings, _ = self.prepare_request(model_settings, model_request_parameters)
55
57
  model_settings = cast(MCPSamplingModelSettings, model_settings or {})
56
58
 
57
59
  result = await self.session.create_message(
@@ -185,6 +185,10 @@ class MistralModel(Model):
185
185
  ) -> ModelResponse:
186
186
  """Make a non-streaming request to the model from Pydantic AI call."""
187
187
  check_allow_model_requests()
188
+ model_settings, model_request_parameters = self.prepare_request(
189
+ model_settings,
190
+ model_request_parameters,
191
+ )
188
192
  response = await self._completions_create(
189
193
  messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters
190
194
  )
@@ -201,6 +205,10 @@ class MistralModel(Model):
201
205
  ) -> AsyncIterator[StreamedResponse]:
202
206
  """Make a streaming request to the model from Pydantic AI call."""
203
207
  check_allow_model_requests()
208
+ model_settings, model_request_parameters = self.prepare_request(
209
+ model_settings,
210
+ model_request_parameters,
211
+ )
204
212
  response = await self._stream_completions_create(
205
213
  messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters
206
214
  )