pydantic-ai-slim 0.8.0__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (70) hide show
  1. pydantic_ai/__init__.py +28 -2
  2. pydantic_ai/_agent_graph.py +310 -140
  3. pydantic_ai/_function_schema.py +5 -5
  4. pydantic_ai/_griffe.py +2 -1
  5. pydantic_ai/_otel_messages.py +2 -2
  6. pydantic_ai/_output.py +31 -35
  7. pydantic_ai/_parts_manager.py +4 -4
  8. pydantic_ai/_run_context.py +3 -1
  9. pydantic_ai/_system_prompt.py +2 -2
  10. pydantic_ai/_tool_manager.py +3 -22
  11. pydantic_ai/_utils.py +14 -26
  12. pydantic_ai/ag_ui.py +7 -8
  13. pydantic_ai/agent/__init__.py +84 -17
  14. pydantic_ai/agent/abstract.py +35 -4
  15. pydantic_ai/agent/wrapper.py +6 -0
  16. pydantic_ai/builtin_tools.py +2 -2
  17. pydantic_ai/common_tools/duckduckgo.py +4 -2
  18. pydantic_ai/durable_exec/temporal/__init__.py +70 -17
  19. pydantic_ai/durable_exec/temporal/_agent.py +23 -2
  20. pydantic_ai/durable_exec/temporal/_function_toolset.py +53 -6
  21. pydantic_ai/durable_exec/temporal/_logfire.py +6 -3
  22. pydantic_ai/durable_exec/temporal/_mcp_server.py +2 -1
  23. pydantic_ai/durable_exec/temporal/_model.py +2 -2
  24. pydantic_ai/durable_exec/temporal/_run_context.py +2 -1
  25. pydantic_ai/durable_exec/temporal/_toolset.py +2 -1
  26. pydantic_ai/exceptions.py +45 -2
  27. pydantic_ai/format_prompt.py +2 -2
  28. pydantic_ai/mcp.py +2 -2
  29. pydantic_ai/messages.py +81 -28
  30. pydantic_ai/models/__init__.py +19 -7
  31. pydantic_ai/models/anthropic.py +6 -6
  32. pydantic_ai/models/bedrock.py +63 -57
  33. pydantic_ai/models/cohere.py +3 -3
  34. pydantic_ai/models/fallback.py +2 -2
  35. pydantic_ai/models/function.py +25 -23
  36. pydantic_ai/models/gemini.py +10 -13
  37. pydantic_ai/models/google.py +4 -4
  38. pydantic_ai/models/groq.py +5 -5
  39. pydantic_ai/models/huggingface.py +5 -5
  40. pydantic_ai/models/instrumented.py +44 -21
  41. pydantic_ai/models/mcp_sampling.py +3 -1
  42. pydantic_ai/models/mistral.py +8 -8
  43. pydantic_ai/models/openai.py +20 -29
  44. pydantic_ai/models/test.py +24 -4
  45. pydantic_ai/output.py +27 -32
  46. pydantic_ai/profiles/__init__.py +3 -3
  47. pydantic_ai/profiles/groq.py +1 -1
  48. pydantic_ai/profiles/openai.py +25 -4
  49. pydantic_ai/providers/anthropic.py +2 -3
  50. pydantic_ai/providers/bedrock.py +3 -2
  51. pydantic_ai/result.py +173 -52
  52. pydantic_ai/retries.py +10 -29
  53. pydantic_ai/run.py +12 -5
  54. pydantic_ai/tools.py +126 -22
  55. pydantic_ai/toolsets/__init__.py +4 -1
  56. pydantic_ai/toolsets/_dynamic.py +4 -4
  57. pydantic_ai/toolsets/abstract.py +18 -2
  58. pydantic_ai/toolsets/approval_required.py +32 -0
  59. pydantic_ai/toolsets/combined.py +7 -12
  60. pydantic_ai/toolsets/{deferred.py → external.py} +11 -5
  61. pydantic_ai/toolsets/filtered.py +1 -1
  62. pydantic_ai/toolsets/function.py +13 -4
  63. pydantic_ai/toolsets/wrapper.py +2 -1
  64. pydantic_ai/usage.py +7 -5
  65. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0b1.dist-info}/METADATA +6 -7
  66. pydantic_ai_slim-1.0.0b1.dist-info/RECORD +120 -0
  67. pydantic_ai_slim-0.8.0.dist-info/RECORD +0 -119
  68. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0b1.dist-info}/WHEEL +0 -0
  69. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0b1.dist-info}/entry_points.txt +0 -0
  70. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ from collections.abc import AsyncIterable, AsyncIterator, Iterable
5
5
  from contextlib import asynccontextmanager
6
6
  from dataclasses import dataclass, field
7
7
  from datetime import datetime
8
- from typing import Any, Literal, Union, cast
8
+ from typing import Any, Literal, cast
9
9
 
10
10
  import pydantic_core
11
11
  from httpx import Timeout
@@ -79,7 +79,7 @@ try:
79
79
  from mistralai.models.usermessage import UserMessage as MistralUserMessage
80
80
  from mistralai.types.basemodel import Unset as MistralUnset
81
81
  from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync
82
- except ImportError as e: # pragma: no cover
82
+ except ImportError as e:
83
83
  raise ImportError(
84
84
  'Please install `mistral` to use the Mistral model, '
85
85
  'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
@@ -90,7 +90,7 @@ LatestMistralModelNames = Literal[
90
90
  ]
91
91
  """Latest Mistral models."""
92
92
 
93
- MistralModelName = Union[str, LatestMistralModelNames]
93
+ MistralModelName = str | LatestMistralModelNames
94
94
  """Possible Mistral model names.
95
95
 
96
96
  Since Mistral supports a variety of date-stamped models, we explicitly list the most popular models but
@@ -117,7 +117,7 @@ class MistralModel(Model):
117
117
  """
118
118
 
119
119
  client: Mistral = field(repr=False)
120
- json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n"""
120
+ json_mode_schema_prompt: str
121
121
 
122
122
  _model_name: MistralModelName = field(repr=False)
123
123
  _provider: Provider[Mistral] = field(repr=False)
@@ -348,11 +348,11 @@ class MistralModel(Model):
348
348
  parts.append(tool)
349
349
 
350
350
  return ModelResponse(
351
- parts,
351
+ parts=parts,
352
352
  usage=_map_usage(response),
353
353
  model_name=response.model,
354
354
  timestamp=timestamp,
355
- provider_request_id=response.id,
355
+ provider_response_id=response.id,
356
356
  provider_name=self._provider.name,
357
357
  )
358
358
 
@@ -515,7 +515,7 @@ class MistralModel(Model):
515
515
  pass
516
516
  elif isinstance(part, ToolCallPart):
517
517
  tool_calls.append(self._map_tool_call(part))
518
- elif isinstance(part, (BuiltinToolCallPart, BuiltinToolReturnPart)): # pragma: no cover
518
+ elif isinstance(part, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
519
519
  # This is currently never returned from mistral
520
520
  pass
521
521
  else:
@@ -576,7 +576,7 @@ class MistralModel(Model):
576
576
  return MistralUserMessage(content=content)
577
577
 
578
578
 
579
- MistralToolCallId = Union[str, None]
579
+ MistralToolCallId = str | None
580
580
 
581
581
 
582
582
  @dataclass
@@ -6,7 +6,7 @@ from collections.abc import AsyncIterable, AsyncIterator, Sequence
6
6
  from contextlib import asynccontextmanager
7
7
  from dataclasses import dataclass, field
8
8
  from datetime import datetime
9
- from typing import Any, Literal, Union, cast, overload
9
+ from typing import Any, Literal, cast, overload
10
10
 
11
11
  from pydantic import ValidationError
12
12
  from typing_extensions import assert_never, deprecated
@@ -90,7 +90,7 @@ __all__ = (
90
90
  'OpenAIModelName',
91
91
  )
92
92
 
93
- OpenAIModelName = Union[str, AllModels]
93
+ OpenAIModelName = str | AllModels
94
94
  """
95
95
  Possible OpenAI model names.
96
96
 
@@ -409,13 +409,6 @@ class OpenAIChatModel(Model):
409
409
  for setting in unsupported_model_settings:
410
410
  model_settings.pop(setting, None)
411
411
 
412
- # TODO(Marcelo): Deprecate this in favor of `openai_unsupported_model_settings`.
413
- sampling_settings = (
414
- model_settings
415
- if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
416
- else OpenAIChatModelSettings()
417
- )
418
-
419
412
  try:
420
413
  extra_headers = model_settings.get('extra_headers', {})
421
414
  extra_headers.setdefault('User-Agent', get_user_agent())
@@ -437,13 +430,13 @@ class OpenAIChatModel(Model):
437
430
  web_search_options=web_search_options or NOT_GIVEN,
438
431
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
439
432
  prediction=model_settings.get('openai_prediction', NOT_GIVEN),
440
- temperature=sampling_settings.get('temperature', NOT_GIVEN),
441
- top_p=sampling_settings.get('top_p', NOT_GIVEN),
442
- presence_penalty=sampling_settings.get('presence_penalty', NOT_GIVEN),
443
- frequency_penalty=sampling_settings.get('frequency_penalty', NOT_GIVEN),
444
- logit_bias=sampling_settings.get('logit_bias', NOT_GIVEN),
445
- logprobs=sampling_settings.get('openai_logprobs', NOT_GIVEN),
446
- top_logprobs=sampling_settings.get('openai_top_logprobs', NOT_GIVEN),
433
+ temperature=model_settings.get('temperature', NOT_GIVEN),
434
+ top_p=model_settings.get('top_p', NOT_GIVEN),
435
+ presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
436
+ frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
437
+ logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
438
+ logprobs=model_settings.get('openai_logprobs', NOT_GIVEN),
439
+ top_logprobs=model_settings.get('openai_top_logprobs', NOT_GIVEN),
447
440
  extra_headers=extra_headers,
448
441
  extra_body=model_settings.get('extra_body'),
449
442
  )
@@ -512,12 +505,12 @@ class OpenAIChatModel(Model):
512
505
  part.tool_call_id = _guard_tool_call_id(part)
513
506
  items.append(part)
514
507
  return ModelResponse(
515
- items,
508
+ parts=items,
516
509
  usage=_map_usage(response),
517
510
  model_name=response.model,
518
511
  timestamp=timestamp,
519
512
  provider_details=vendor_details,
520
- provider_request_id=response.id,
513
+ provider_response_id=response.id,
521
514
  provider_name=self._provider.name,
522
515
  )
523
516
 
@@ -582,7 +575,7 @@ class OpenAIChatModel(Model):
582
575
  elif isinstance(item, ToolCallPart):
583
576
  tool_calls.append(self._map_tool_call(item))
584
577
  # OpenAI doesn't return built-in tool calls
585
- elif isinstance(item, (BuiltinToolCallPart, BuiltinToolReturnPart)): # pragma: no cover
578
+ elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
586
579
  pass
587
580
  else:
588
581
  assert_never(item)
@@ -828,10 +821,10 @@ class OpenAIResponsesModel(Model):
828
821
  elif item.type == 'function_call':
829
822
  items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
830
823
  return ModelResponse(
831
- items,
824
+ parts=items,
832
825
  usage=_map_usage(response),
833
826
  model_name=response.model,
834
- provider_request_id=response.id,
827
+ provider_response_id=response.id,
835
828
  timestamp=timestamp,
836
829
  provider_name=self._provider.name,
837
830
  )
@@ -918,11 +911,9 @@ class OpenAIResponsesModel(Model):
918
911
  text = text or {}
919
912
  text['verbosity'] = verbosity
920
913
 
921
- sampling_settings = (
922
- model_settings
923
- if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
924
- else OpenAIResponsesModelSettings()
925
- )
914
+ unsupported_model_settings = OpenAIModelProfile.from_profile(self.profile).openai_unsupported_model_settings
915
+ for setting in unsupported_model_settings:
916
+ model_settings.pop(setting, None)
926
917
 
927
918
  try:
928
919
  extra_headers = model_settings.get('extra_headers', {})
@@ -936,8 +927,8 @@ class OpenAIResponsesModel(Model):
936
927
  tool_choice=tool_choice or NOT_GIVEN,
937
928
  max_output_tokens=model_settings.get('max_tokens', NOT_GIVEN),
938
929
  stream=stream,
939
- temperature=sampling_settings.get('temperature', NOT_GIVEN),
940
- top_p=sampling_settings.get('top_p', NOT_GIVEN),
930
+ temperature=model_settings.get('temperature', NOT_GIVEN),
931
+ top_p=model_settings.get('top_p', NOT_GIVEN),
941
932
  truncation=model_settings.get('openai_truncation', NOT_GIVEN),
942
933
  timeout=model_settings.get('timeout', NOT_GIVEN),
943
934
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
@@ -1049,7 +1040,7 @@ class OpenAIResponsesModel(Model):
1049
1040
  elif isinstance(item, ToolCallPart):
1050
1041
  openai_messages.append(self._map_tool_call(item))
1051
1042
  # OpenAI doesn't return built-in tool calls
1052
- elif isinstance(item, (BuiltinToolCallPart, BuiltinToolReturnPart)):
1043
+ elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
1053
1044
  pass
1054
1045
  elif isinstance(item, ThinkingPart):
1055
1046
  # NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
@@ -195,7 +195,10 @@ class TestModel(Model):
195
195
  # if there are tools, the first thing we want to do is call all of them
196
196
  if tool_calls and not any(isinstance(m, ModelResponse) for m in messages):
197
197
  return ModelResponse(
198
- parts=[ToolCallPart(name, self.gen_tool_args(args)) for name, args in tool_calls],
198
+ parts=[
199
+ ToolCallPart(name, self.gen_tool_args(args), tool_call_id=f'pyd_ai_tool_call_id__{name}')
200
+ for name, args in tool_calls
201
+ ],
199
202
  model_name=self._model_name,
200
203
  )
201
204
 
@@ -220,6 +223,7 @@ class TestModel(Model):
220
223
  output_wrapper.value
221
224
  if isinstance(output_wrapper, _WrappedToolOutput) and output_wrapper.value is not None
222
225
  else self.gen_tool_args(tool),
226
+ tool_call_id=f'pyd_ai_tool_call_id__{tool.name}',
223
227
  )
224
228
  for tool in output_tools
225
229
  if tool.name in new_retry_names
@@ -250,11 +254,27 @@ class TestModel(Model):
250
254
  output_tool = output_tools[self.seed % len(output_tools)]
251
255
  if custom_output_args is not None:
252
256
  return ModelResponse(
253
- parts=[ToolCallPart(output_tool.name, custom_output_args)], model_name=self._model_name
257
+ parts=[
258
+ ToolCallPart(
259
+ output_tool.name,
260
+ custom_output_args,
261
+ tool_call_id=f'pyd_ai_tool_call_id__{output_tool.name}',
262
+ )
263
+ ],
264
+ model_name=self._model_name,
254
265
  )
255
266
  else:
256
267
  response_args = self.gen_tool_args(output_tool)
257
- return ModelResponse(parts=[ToolCallPart(output_tool.name, response_args)], model_name=self._model_name)
268
+ return ModelResponse(
269
+ parts=[
270
+ ToolCallPart(
271
+ output_tool.name,
272
+ response_args,
273
+ tool_call_id=f'pyd_ai_tool_call_id__{output_tool.name}',
274
+ )
275
+ ],
276
+ model_name=self._model_name,
277
+ )
258
278
 
259
279
 
260
280
  @dataclass
@@ -293,7 +313,7 @@ class TestStreamedResponse(StreamedResponse):
293
313
  yield self._parts_manager.handle_tool_call_part(
294
314
  vendor_part_id=i, tool_name=part.tool_name, args=part.args, tool_call_id=part.tool_call_id
295
315
  )
296
- elif isinstance(part, (BuiltinToolCallPart, BuiltinToolReturnPart)): # pragma: no cover
316
+ elif isinstance(part, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
297
317
  # NOTE: These parts are not generated by TestModel, but we need to handle them for type checking
298
318
  assert False, f'Unexpected part type in TestModel: {type(part).__name__}'
299
319
  elif isinstance(part, ThinkingPart): # pragma: no cover
pydantic_ai/output.py CHANGED
@@ -1,17 +1,17 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections.abc import Awaitable, Sequence
3
+ from collections.abc import Awaitable, Callable, Sequence
4
4
  from dataclasses import dataclass
5
- from typing import Any, Callable, Generic, Literal, Union
5
+ from typing import Any, Generic, Literal
6
6
 
7
7
  from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler
8
8
  from pydantic.json_schema import JsonSchemaValue
9
9
  from pydantic_core import core_schema
10
- from typing_extensions import TypeAliasType, TypeVar
10
+ from typing_extensions import TypeAliasType, TypeVar, deprecated
11
11
 
12
12
  from . import _utils
13
13
  from .messages import ToolCallPart
14
- from .tools import RunContext, ToolDefinition
14
+ from .tools import DeferredToolRequests, RunContext, ToolDefinition
15
15
 
16
16
  __all__ = (
17
17
  # classes
@@ -42,7 +42,7 @@ StructuredOutputMode = Literal['tool', 'native', 'prompted']
42
42
 
43
43
 
44
44
  OutputTypeOrFunction = TypeAliasType(
45
- 'OutputTypeOrFunction', Union[type[T_co], Callable[..., Union[Awaitable[T_co], T_co]]], type_params=(T_co,)
45
+ 'OutputTypeOrFunction', type[T_co] | Callable[..., Awaitable[T_co] | T_co], type_params=(T_co,)
46
46
  )
47
47
  """Definition of an output type or function.
48
48
 
@@ -54,10 +54,7 @@ See [output docs](../output.md) for more information.
54
54
 
55
55
  TextOutputFunc = TypeAliasType(
56
56
  'TextOutputFunc',
57
- Union[
58
- Callable[[RunContext, str], Union[Awaitable[T_co], T_co]],
59
- Callable[[str], Union[Awaitable[T_co], T_co]],
60
- ],
57
+ Callable[[RunContext, str], Awaitable[T_co] | T_co] | Callable[[str], Awaitable[T_co] | T_co],
61
58
  type_params=(T_co,),
62
59
  )
63
60
  """Definition of a function that will be called to process the model's plain text output. The function must take a single string argument.
@@ -135,10 +132,9 @@ class NativeOutput(Generic[OutputDataT]):
135
132
 
136
133
  Example:
137
134
  ```python {title="native_output.py" requires="tool_output.py"}
138
- from tool_output import Fruit, Vehicle
139
-
140
135
  from pydantic_ai import Agent, NativeOutput
141
136
 
137
+ from tool_output import Fruit, Vehicle
142
138
 
143
139
  agent = Agent(
144
140
  'openai:gpt-4o',
@@ -184,10 +180,11 @@ class PromptedOutput(Generic[OutputDataT]):
184
180
  Example:
185
181
  ```python {title="prompted_output.py" requires="tool_output.py"}
186
182
  from pydantic import BaseModel
187
- from tool_output import Vehicle
188
183
 
189
184
  from pydantic_ai import Agent, PromptedOutput
190
185
 
186
+ from tool_output import Vehicle
187
+
191
188
 
192
189
  class Device(BaseModel):
193
190
  name: str
@@ -286,18 +283,17 @@ def StructuredDict(
286
283
  ```python {title="structured_dict.py"}
287
284
  from pydantic_ai import Agent, StructuredDict
288
285
 
289
-
290
286
  schema = {
291
- "type": "object",
292
- "properties": {
293
- "name": {"type": "string"},
294
- "age": {"type": "integer"}
287
+ 'type': 'object',
288
+ 'properties': {
289
+ 'name': {'type': 'string'},
290
+ 'age': {'type': 'integer'}
295
291
  },
296
- "required": ["name", "age"]
292
+ 'required': ['name', 'age']
297
293
  }
298
294
 
299
295
  agent = Agent('openai:gpt-4o', output_type=StructuredDict(schema))
300
- result = agent.run_sync("Create a person")
296
+ result = agent.run_sync('Create a person')
301
297
  print(result.output)
302
298
  #> {'name': 'John Doe', 'age': 30}
303
299
  ```
@@ -333,16 +329,13 @@ def StructuredDict(
333
329
 
334
330
  _OutputSpecItem = TypeAliasType(
335
331
  '_OutputSpecItem',
336
- Union[OutputTypeOrFunction[T_co], ToolOutput[T_co], NativeOutput[T_co], PromptedOutput[T_co], TextOutput[T_co]],
332
+ OutputTypeOrFunction[T_co] | ToolOutput[T_co] | NativeOutput[T_co] | PromptedOutput[T_co] | TextOutput[T_co],
337
333
  type_params=(T_co,),
338
334
  )
339
335
 
340
336
  OutputSpec = TypeAliasType(
341
337
  'OutputSpec',
342
- Union[
343
- _OutputSpecItem[T_co],
344
- Sequence['OutputSpec[T_co]'],
345
- ],
338
+ _OutputSpecItem[T_co] | Sequence['OutputSpec[T_co]'],
346
339
  type_params=(T_co,),
347
340
  )
348
341
  """Specification of the agent's output data.
@@ -359,12 +352,14 @@ See [output docs](../output.md) for more information.
359
352
  """
360
353
 
361
354
 
362
- @dataclass
363
- class DeferredToolCalls:
364
- """Container for calls of deferred tools. This can be used as an agent's `output_type` and will be used as the output of the agent run if the model called any deferred tools.
365
-
366
- See [deferred toolset docs](../toolsets.md#deferred-toolset) for more information.
367
- """
355
+ @deprecated('`DeferredToolCalls` is deprecated, use `DeferredToolRequests` instead')
356
+ class DeferredToolCalls(DeferredToolRequests): # pragma: no cover
357
+ @property
358
+ @deprecated('`DeferredToolCalls.tool_calls` is deprecated, use `DeferredToolRequests.calls` instead')
359
+ def tool_calls(self) -> list[ToolCallPart]:
360
+ return self.calls
368
361
 
369
- tool_calls: list[ToolCallPart]
370
- tool_defs: dict[str, ToolDefinition]
362
+ @property
363
+ @deprecated('`DeferredToolCalls.tool_defs` is deprecated')
364
+ def tool_defs(self) -> dict[str, ToolDefinition]:
365
+ return {}
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
+ from collections.abc import Callable
3
4
  from dataclasses import dataclass, fields, replace
4
5
  from textwrap import dedent
5
- from typing import Callable, Union
6
6
 
7
7
  from typing_extensions import Self
8
8
 
@@ -18,7 +18,7 @@ __all__ = [
18
18
  ]
19
19
 
20
20
 
21
- @dataclass
21
+ @dataclass(kw_only=True)
22
22
  class ModelProfile:
23
23
  """Describes how requests to and responses from specific models or families of models need to be constructed and processed to get the best results, independent of the model and provider classes used."""
24
24
 
@@ -75,6 +75,6 @@ class ModelProfile:
75
75
  return replace(self, **non_default_attrs)
76
76
 
77
77
 
78
- ModelProfileSpec = Union[ModelProfile, Callable[[str], Union[ModelProfile, None]]]
78
+ ModelProfileSpec = ModelProfile | Callable[[str], ModelProfile | None]
79
79
 
80
80
  DEFAULT_PROFILE = ModelProfile()
@@ -5,7 +5,7 @@ from dataclasses import dataclass
5
5
  from . import ModelProfile
6
6
 
7
7
 
8
- @dataclass
8
+ @dataclass(kw_only=True)
9
9
  class GroqModelProfile(ModelProfile):
10
10
  """Profile for models used with GroqModel.
11
11
 
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import re
4
+ import warnings
4
5
  from collections.abc import Sequence
5
6
  from dataclasses import dataclass
6
7
  from typing import Any, Literal
@@ -11,7 +12,7 @@ from ._json_schema import JsonSchema, JsonSchemaTransformer
11
12
  OpenAISystemPromptRole = Literal['system', 'developer', 'user']
12
13
 
13
14
 
14
- @dataclass
15
+ @dataclass(kw_only=True)
15
16
  class OpenAIModelProfile(ModelProfile):
16
17
  """Profile for models used with `OpenAIChatModel`.
17
18
 
@@ -21,7 +22,6 @@ class OpenAIModelProfile(ModelProfile):
21
22
  openai_supports_strict_tool_definition: bool = True
22
23
  """This can be set by a provider or user if the OpenAI-"compatible" API doesn't support strict tool definitions."""
23
24
 
24
- # TODO(Marcelo): Deprecate this in favor of `openai_unsupported_model_settings`.
25
25
  openai_supports_sampling_settings: bool = True
26
26
  """Turn off to don't send sampling settings like `temperature` and `top_p` to models that don't support them, like OpenAI's o-series reasoning models."""
27
27
 
@@ -38,6 +38,14 @@ class OpenAIModelProfile(ModelProfile):
38
38
  openai_system_prompt_role: OpenAISystemPromptRole | None = None
39
39
  """The role to use for the system prompt message. If not provided, defaults to `'system'`."""
40
40
 
41
+ def __post_init__(self): # pragma: no cover
42
+ if not self.openai_supports_sampling_settings:
43
+ warnings.warn(
44
+ 'The `openai_supports_sampling_settings` has no effect, and it will be removed in future versions. '
45
+ 'Use `openai_unsupported_model_settings` instead.',
46
+ DeprecationWarning,
47
+ )
48
+
41
49
 
42
50
  def openai_model_profile(model_name: str) -> ModelProfile:
43
51
  """Get the model profile for an OpenAI model."""
@@ -46,6 +54,19 @@ def openai_model_profile(model_name: str) -> ModelProfile:
46
54
  # We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
47
55
  # when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
48
56
 
57
+ if is_reasoning_model:
58
+ openai_unsupported_model_settings = (
59
+ 'temperature',
60
+ 'top_p',
61
+ 'presence_penalty',
62
+ 'frequency_penalty',
63
+ 'logit_bias',
64
+ 'logprobs',
65
+ 'top_logprobs',
66
+ )
67
+ else:
68
+ openai_unsupported_model_settings = ()
69
+
49
70
  # The o1-mini model doesn't support the `system` role, so we default to `user`.
50
71
  # See https://github.com/pydantic/pydantic-ai/issues/974 for more details.
51
72
  openai_system_prompt_role = 'user' if model_name.startswith('o1-mini') else None
@@ -54,7 +75,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
54
75
  json_schema_transformer=OpenAIJsonSchemaTransformer,
55
76
  supports_json_schema_output=True,
56
77
  supports_json_object_output=True,
57
- openai_supports_sampling_settings=not is_reasoning_model,
78
+ openai_unsupported_model_settings=openai_unsupported_model_settings,
58
79
  openai_system_prompt_role=openai_system_prompt_role,
59
80
  )
60
81
 
@@ -89,7 +110,7 @@ _STRICT_COMPATIBLE_STRING_FORMATS = [
89
110
  _sentinel = object()
90
111
 
91
112
 
92
- @dataclass
113
+ @dataclass(init=False)
93
114
  class OpenAIJsonSchemaTransformer(JsonSchemaTransformer):
94
115
  """Recursively handle the schema to make it compatible with OpenAI strict mode.
95
116
 
@@ -1,10 +1,9 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import os
4
- from typing import Union, overload
4
+ from typing import TypeAlias, overload
5
5
 
6
6
  import httpx
7
- from typing_extensions import TypeAlias
8
7
 
9
8
  from pydantic_ai.exceptions import UserError
10
9
  from pydantic_ai.models import cached_async_http_client
@@ -21,7 +20,7 @@ except ImportError as _import_error:
21
20
  ) from _import_error
22
21
 
23
22
 
24
- AsyncAnthropicClient: TypeAlias = Union[AsyncAnthropic, AsyncAnthropicBedrock]
23
+ AsyncAnthropicClient: TypeAlias = AsyncAnthropic | AsyncAnthropicBedrock
25
24
 
26
25
 
27
26
  class AnthropicProvider(Provider[AsyncAnthropicClient]):
@@ -2,8 +2,9 @@ from __future__ import annotations as _annotations
2
2
 
3
3
  import os
4
4
  import re
5
+ from collections.abc import Callable
5
6
  from dataclasses import dataclass
6
- from typing import Callable, Literal, overload
7
+ from typing import Literal, overload
7
8
 
8
9
  from pydantic_ai.exceptions import UserError
9
10
  from pydantic_ai.profiles import ModelProfile
@@ -27,7 +28,7 @@ except ImportError as _import_error:
27
28
  ) from _import_error
28
29
 
29
30
 
30
- @dataclass
31
+ @dataclass(kw_only=True)
31
32
  class BedrockModelProfile(ModelProfile):
32
33
  """Profile for models used with BedrockModel.
33
34