pydantic-ai-slim 0.6.2__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (58) hide show
  1. pydantic_ai/_a2a.py +6 -4
  2. pydantic_ai/_agent_graph.py +37 -37
  3. pydantic_ai/_cli.py +3 -3
  4. pydantic_ai/_output.py +8 -0
  5. pydantic_ai/_tool_manager.py +3 -0
  6. pydantic_ai/ag_ui.py +25 -14
  7. pydantic_ai/{agent.py → agent/__init__.py} +209 -1027
  8. pydantic_ai/agent/abstract.py +942 -0
  9. pydantic_ai/agent/wrapper.py +227 -0
  10. pydantic_ai/direct.py +9 -9
  11. pydantic_ai/durable_exec/__init__.py +0 -0
  12. pydantic_ai/durable_exec/temporal/__init__.py +83 -0
  13. pydantic_ai/durable_exec/temporal/_agent.py +699 -0
  14. pydantic_ai/durable_exec/temporal/_function_toolset.py +92 -0
  15. pydantic_ai/durable_exec/temporal/_logfire.py +48 -0
  16. pydantic_ai/durable_exec/temporal/_mcp_server.py +145 -0
  17. pydantic_ai/durable_exec/temporal/_model.py +168 -0
  18. pydantic_ai/durable_exec/temporal/_run_context.py +50 -0
  19. pydantic_ai/durable_exec/temporal/_toolset.py +77 -0
  20. pydantic_ai/ext/aci.py +10 -9
  21. pydantic_ai/ext/langchain.py +4 -2
  22. pydantic_ai/mcp.py +203 -75
  23. pydantic_ai/messages.py +2 -2
  24. pydantic_ai/models/__init__.py +93 -9
  25. pydantic_ai/models/anthropic.py +16 -7
  26. pydantic_ai/models/bedrock.py +8 -5
  27. pydantic_ai/models/cohere.py +1 -4
  28. pydantic_ai/models/fallback.py +10 -3
  29. pydantic_ai/models/function.py +9 -4
  30. pydantic_ai/models/gemini.py +15 -9
  31. pydantic_ai/models/google.py +84 -20
  32. pydantic_ai/models/groq.py +17 -14
  33. pydantic_ai/models/huggingface.py +18 -12
  34. pydantic_ai/models/instrumented.py +3 -1
  35. pydantic_ai/models/mcp_sampling.py +3 -1
  36. pydantic_ai/models/mistral.py +12 -18
  37. pydantic_ai/models/openai.py +57 -30
  38. pydantic_ai/models/test.py +3 -0
  39. pydantic_ai/models/wrapper.py +6 -2
  40. pydantic_ai/profiles/openai.py +1 -1
  41. pydantic_ai/providers/google.py +7 -7
  42. pydantic_ai/result.py +21 -55
  43. pydantic_ai/run.py +357 -0
  44. pydantic_ai/tools.py +0 -1
  45. pydantic_ai/toolsets/__init__.py +2 -0
  46. pydantic_ai/toolsets/_dynamic.py +87 -0
  47. pydantic_ai/toolsets/abstract.py +23 -3
  48. pydantic_ai/toolsets/combined.py +19 -4
  49. pydantic_ai/toolsets/deferred.py +10 -2
  50. pydantic_ai/toolsets/function.py +23 -8
  51. pydantic_ai/toolsets/prefixed.py +4 -0
  52. pydantic_ai/toolsets/wrapper.py +14 -1
  53. pydantic_ai/usage.py +17 -1
  54. {pydantic_ai_slim-0.6.2.dist-info → pydantic_ai_slim-0.7.1.dist-info}/METADATA +7 -5
  55. {pydantic_ai_slim-0.6.2.dist-info → pydantic_ai_slim-0.7.1.dist-info}/RECORD +58 -45
  56. {pydantic_ai_slim-0.6.2.dist-info → pydantic_ai_slim-0.7.1.dist-info}/WHEEL +0 -0
  57. {pydantic_ai_slim-0.6.2.dist-info → pydantic_ai_slim-0.7.1.dist-info}/entry_points.txt +0 -0
  58. {pydantic_ai_slim-0.6.2.dist-info → pydantic_ai_slim-0.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -5,16 +5,15 @@ from collections.abc import AsyncIterable, AsyncIterator
5
5
  from contextlib import asynccontextmanager
6
6
  from dataclasses import dataclass, field
7
7
  from datetime import datetime, timezone
8
- from typing import Literal, Union, cast, overload
8
+ from typing import Any, Literal, Union, cast, overload
9
9
 
10
10
  from typing_extensions import assert_never
11
11
 
12
- from pydantic_ai._thinking_part import split_content_into_text_and_thinking
13
- from pydantic_ai.exceptions import UserError
14
- from pydantic_ai.providers import Provider, infer_provider
15
-
16
12
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
13
+ from .._run_context import RunContext
14
+ from .._thinking_part import split_content_into_text_and_thinking
17
15
  from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc
16
+ from ..exceptions import UserError
18
17
  from ..messages import (
19
18
  AudioUrl,
20
19
  BinaryContent,
@@ -37,9 +36,15 @@ from ..messages import (
37
36
  VideoUrl,
38
37
  )
39
38
  from ..profiles import ModelProfile
39
+ from ..providers import Provider, infer_provider
40
40
  from ..settings import ModelSettings
41
41
  from ..tools import ToolDefinition
42
- from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests
42
+ from . import (
43
+ Model,
44
+ ModelRequestParameters,
45
+ StreamedResponse,
46
+ check_allow_model_requests,
47
+ )
43
48
 
44
49
  try:
45
50
  import aiohttp
@@ -150,12 +155,13 @@ class HuggingFaceModel(Model):
150
155
  messages: list[ModelMessage],
151
156
  model_settings: ModelSettings | None,
152
157
  model_request_parameters: ModelRequestParameters,
158
+ run_context: RunContext[Any] | None = None,
153
159
  ) -> AsyncIterator[StreamedResponse]:
154
160
  check_allow_model_requests()
155
161
  response = await self._completions_create(
156
162
  messages, True, cast(HuggingFaceModelSettings, model_settings or {}), model_request_parameters
157
163
  )
158
- yield await self._process_streamed_response(response)
164
+ yield await self._process_streamed_response(response, model_request_parameters)
159
165
 
160
166
  @property
161
167
  def model_name(self) -> HuggingFaceModelName:
@@ -263,7 +269,9 @@ class HuggingFaceModel(Model):
263
269
  vendor_id=response.id,
264
270
  )
265
271
 
266
- async def _process_streamed_response(self, response: AsyncIterable[ChatCompletionStreamOutput]) -> StreamedResponse:
272
+ async def _process_streamed_response(
273
+ self, response: AsyncIterable[ChatCompletionStreamOutput], model_request_parameters: ModelRequestParameters
274
+ ) -> StreamedResponse:
267
275
  """Process a streamed response, and prepare a streaming response to return."""
268
276
  peekable_response = _utils.PeekableAsyncStream(response)
269
277
  first_chunk = await peekable_response.peek()
@@ -273,6 +281,7 @@ class HuggingFaceModel(Model):
273
281
  )
274
282
 
275
283
  return HuggingFaceStreamedResponse(
284
+ model_request_parameters=model_request_parameters,
276
285
  _model_name=self._model_name,
277
286
  _model_profile=self.profile,
278
287
  _response=peekable_response,
@@ -280,10 +289,7 @@ class HuggingFaceModel(Model):
280
289
  )
281
290
 
282
291
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ChatCompletionInputTool]:
283
- tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
284
- if model_request_parameters.output_tools:
285
- tools += [self._map_tool_definition(r) for r in model_request_parameters.output_tools]
286
- return tools
292
+ return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]
287
293
 
288
294
  async def _map_messages(
289
295
  self, messages: list[ModelMessage]
@@ -18,6 +18,7 @@ from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provide
18
18
  from opentelemetry.util.types import AttributeValue
19
19
  from pydantic import TypeAdapter
20
20
 
21
+ from .._run_context import RunContext
21
22
  from ..messages import ModelMessage, ModelRequest, ModelResponse
22
23
  from ..settings import ModelSettings
23
24
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse
@@ -218,12 +219,13 @@ class InstrumentedModel(WrapperModel):
218
219
  messages: list[ModelMessage],
219
220
  model_settings: ModelSettings | None,
220
221
  model_request_parameters: ModelRequestParameters,
222
+ run_context: RunContext[Any] | None = None,
221
223
  ) -> AsyncIterator[StreamedResponse]:
222
224
  with self._instrument(messages, model_settings, model_request_parameters) as finish:
223
225
  response_stream: StreamedResponse | None = None
224
226
  try:
225
227
  async with super().request_stream(
226
- messages, model_settings, model_request_parameters
228
+ messages, model_settings, model_request_parameters, run_context
227
229
  ) as response_stream:
228
230
  yield response_stream
229
231
  finally:
@@ -3,9 +3,10 @@ from __future__ import annotations as _annotations
3
3
  from collections.abc import AsyncIterator
4
4
  from contextlib import asynccontextmanager
5
5
  from dataclasses import dataclass
6
- from typing import TYPE_CHECKING, cast
6
+ from typing import TYPE_CHECKING, Any, cast
7
7
 
8
8
  from .. import _mcp, exceptions, usage
9
+ from .._run_context import RunContext
9
10
  from ..messages import ModelMessage, ModelResponse
10
11
  from ..settings import ModelSettings
11
12
  from . import Model, ModelRequestParameters, StreamedResponse
@@ -76,6 +77,7 @@ class MCPSamplingModel(Model):
76
77
  messages: list[ModelMessage],
77
78
  model_settings: ModelSettings | None,
78
79
  model_request_parameters: ModelRequestParameters,
80
+ run_context: RunContext[Any] | None = None,
79
81
  ) -> AsyncIterator[StreamedResponse]:
80
82
  raise NotImplementedError('MCP Sampling does not support streaming')
81
83
  yield
@@ -11,11 +11,11 @@ import pydantic_core
11
11
  from httpx import Timeout
12
12
  from typing_extensions import assert_never
13
13
 
14
- from pydantic_ai._thinking_part import split_content_into_text_and_thinking
15
- from pydantic_ai.exceptions import UserError
16
-
17
14
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils
15
+ from .._run_context import RunContext
16
+ from .._thinking_part import split_content_into_text_and_thinking
18
17
  from .._utils import generate_tool_call_id as _generate_tool_call_id, now_utc as _now_utc, number_to_datetime
18
+ from ..exceptions import UserError
19
19
  from ..messages import (
20
20
  BinaryContent,
21
21
  BuiltinToolCallPart,
@@ -176,6 +176,7 @@ class MistralModel(Model):
176
176
  messages: list[ModelMessage],
177
177
  model_settings: ModelSettings | None,
178
178
  model_request_parameters: ModelRequestParameters,
179
+ run_context: RunContext[Any] | None = None,
179
180
  ) -> AsyncIterator[StreamedResponse]:
180
181
  """Make a streaming request to the model from Pydantic AI call."""
181
182
  check_allow_model_requests()
@@ -183,7 +184,7 @@ class MistralModel(Model):
183
184
  messages, cast(MistralModelSettings, model_settings or {}), model_request_parameters
184
185
  )
185
186
  async with response:
186
- yield await self._process_streamed_response(model_request_parameters.output_tools, response)
187
+ yield await self._process_streamed_response(response, model_request_parameters)
187
188
 
188
189
  @property
189
190
  def model_name(self) -> MistralModelName:
@@ -246,11 +247,7 @@ class MistralModel(Model):
246
247
  if model_request_parameters.builtin_tools:
247
248
  raise UserError('Mistral does not support built-in tools')
248
249
 
249
- if (
250
- model_request_parameters.output_tools
251
- and model_request_parameters.function_tools
252
- or model_request_parameters.function_tools
253
- ):
250
+ if model_request_parameters.function_tools:
254
251
  # Function Calling
255
252
  response = await self.client.chat.stream_async(
256
253
  model=str(self._model_name),
@@ -318,16 +315,13 @@ class MistralModel(Model):
318
315
 
319
316
  Returns None if both function_tools and output_tools are empty.
320
317
  """
321
- all_tools: list[ToolDefinition] = (
322
- model_request_parameters.function_tools + model_request_parameters.output_tools
323
- )
324
318
  tools = [
325
319
  MistralTool(
326
320
  function=MistralFunction(
327
321
  name=r.name, parameters=r.parameters_json_schema, description=r.description or ''
328
322
  )
329
323
  )
330
- for r in all_tools
324
+ for r in model_request_parameters.tool_defs.values()
331
325
  ]
332
326
  return tools if tools else None
333
327
 
@@ -359,8 +353,8 @@ class MistralModel(Model):
359
353
 
360
354
  async def _process_streamed_response(
361
355
  self,
362
- output_tools: list[ToolDefinition],
363
356
  response: MistralEventStreamAsync[MistralCompletionEvent],
357
+ model_request_parameters: ModelRequestParameters,
364
358
  ) -> StreamedResponse:
365
359
  """Process a streamed response, and prepare a streaming response to return."""
366
360
  peekable_response = _utils.PeekableAsyncStream(response)
@@ -376,10 +370,10 @@ class MistralModel(Model):
376
370
  timestamp = _now_utc()
377
371
 
378
372
  return MistralStreamedResponse(
373
+ model_request_parameters=model_request_parameters,
379
374
  _response=peekable_response,
380
375
  _model_name=self._model_name,
381
376
  _timestamp=timestamp,
382
- _output_tools={c.name: c for c in output_tools},
383
377
  )
384
378
 
385
379
  @staticmethod
@@ -586,7 +580,6 @@ class MistralStreamedResponse(StreamedResponse):
586
580
  _model_name: MistralModelName
587
581
  _response: AsyncIterable[MistralCompletionEvent]
588
582
  _timestamp: datetime
589
- _output_tools: dict[str, ToolDefinition]
590
583
 
591
584
  _delta_content: str = field(default='', init=False)
592
585
 
@@ -605,10 +598,11 @@ class MistralStreamedResponse(StreamedResponse):
605
598
  text = _map_content(content)
606
599
  if text:
607
600
  # Attempt to produce an output tool call from the received text
608
- if self._output_tools:
601
+ output_tools = {c.name: c for c in self.model_request_parameters.output_tools}
602
+ if output_tools:
609
603
  self._delta_content += text
610
604
  # TODO: Port to native "manual JSON" mode
611
- maybe_tool_call_part = self._try_get_output_tool_from_text(self._delta_content, self._output_tools)
605
+ maybe_tool_call_part = self._try_get_output_tool_from_text(self._delta_content, output_tools)
612
606
  if maybe_tool_call_part:
613
607
  yield self._parts_manager.handle_tool_call_part(
614
608
  vendor_part_id='output',
@@ -11,13 +11,13 @@ from typing import Any, Literal, Union, cast, overload
11
11
  from pydantic import ValidationError
12
12
  from typing_extensions import assert_never
13
13
 
14
- from pydantic_ai.exceptions import UserError
15
-
16
14
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
17
15
  from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
16
+ from .._run_context import RunContext
18
17
  from .._thinking_part import split_content_into_text_and_thinking
19
18
  from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime
20
19
  from ..builtin_tools import CodeExecutionTool, WebSearchTool
20
+ from ..exceptions import UserError
21
21
  from ..messages import (
22
22
  AudioUrl,
23
23
  BinaryContent,
@@ -59,6 +59,11 @@ try:
59
59
  from openai.types.chat.chat_completion_content_part_image_param import ImageURL
60
60
  from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
61
61
  from openai.types.chat.chat_completion_content_part_param import File, FileFile
62
+ from openai.types.chat.chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall
63
+ from openai.types.chat.chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall
64
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
65
+ ChatCompletionMessageFunctionToolCallParam,
66
+ )
62
67
  from openai.types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
63
68
  from openai.types.chat.completion_create_params import (
64
69
  WebSearchOptions,
@@ -172,6 +177,14 @@ class OpenAIResponsesModelSettings(OpenAIModelSettings, total=False):
172
177
  middle of the conversation.
173
178
  """
174
179
 
180
+ openai_text_verbosity: Literal['low', 'medium', 'high']
181
+ """Constrains the verbosity of the model's text response.
182
+
183
+ Lower values will result in more concise responses, while higher values will
184
+ result in more verbose responses. Currently supported values are `low`,
185
+ `medium`, and `high`.
186
+ """
187
+
175
188
 
176
189
  @dataclass(init=False)
177
190
  class OpenAIModel(Model):
@@ -256,13 +269,14 @@ class OpenAIModel(Model):
256
269
  messages: list[ModelMessage],
257
270
  model_settings: ModelSettings | None,
258
271
  model_request_parameters: ModelRequestParameters,
272
+ run_context: RunContext[Any] | None = None,
259
273
  ) -> AsyncIterator[StreamedResponse]:
260
274
  check_allow_model_requests()
261
275
  response = await self._completions_create(
262
276
  messages, True, cast(OpenAIModelSettings, model_settings or {}), model_request_parameters
263
277
  )
264
278
  async with response:
265
- yield await self._process_streamed_response(response)
279
+ yield await self._process_streamed_response(response, model_request_parameters)
266
280
 
267
281
  @property
268
282
  def model_name(self) -> OpenAIModelName:
@@ -415,7 +429,14 @@ class OpenAIModel(Model):
415
429
  items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
416
430
  if choice.message.tool_calls is not None:
417
431
  for c in choice.message.tool_calls:
418
- part = ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)
432
+ if isinstance(c, ChatCompletionMessageFunctionToolCall):
433
+ part = ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)
434
+ elif isinstance(c, ChatCompletionMessageCustomToolCall): # pragma: no cover
435
+ # NOTE: Custom tool calls are not supported.
436
+ # See <https://github.com/pydantic/pydantic-ai/issues/2513> for more details.
437
+ raise RuntimeError('Custom tool calls are not supported')
438
+ else:
439
+ assert_never(c)
419
440
  part.tool_call_id = _guard_tool_call_id(part)
420
441
  items.append(part)
421
442
  return ModelResponse(
@@ -427,7 +448,9 @@ class OpenAIModel(Model):
427
448
  vendor_id=response.id,
428
449
  )
429
450
 
430
- async def _process_streamed_response(self, response: AsyncStream[ChatCompletionChunk]) -> OpenAIStreamedResponse:
451
+ async def _process_streamed_response(
452
+ self, response: AsyncStream[ChatCompletionChunk], model_request_parameters: ModelRequestParameters
453
+ ) -> OpenAIStreamedResponse:
431
454
  """Process a streamed response, and prepare a streaming response to return."""
432
455
  peekable_response = _utils.PeekableAsyncStream(response)
433
456
  first_chunk = await peekable_response.peek()
@@ -437,6 +460,7 @@ class OpenAIModel(Model):
437
460
  )
438
461
 
439
462
  return OpenAIStreamedResponse(
463
+ model_request_parameters=model_request_parameters,
440
464
  _model_name=self._model_name,
441
465
  _model_profile=self.profile,
442
466
  _response=peekable_response,
@@ -444,10 +468,7 @@ class OpenAIModel(Model):
444
468
  )
445
469
 
446
470
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]:
447
- tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
448
- if model_request_parameters.output_tools:
449
- tools += [self._map_tool_definition(r) for r in model_request_parameters.output_tools]
450
- return tools
471
+ return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]
451
472
 
452
473
  def _get_web_search_options(self, model_request_parameters: ModelRequestParameters) -> WebSearchOptions | None:
453
474
  for tool in model_request_parameters.builtin_tools:
@@ -461,8 +482,10 @@ class OpenAIModel(Model):
461
482
  ),
462
483
  )
463
484
  return WebSearchOptions(search_context_size=tool.search_context_size)
464
- elif isinstance(tool, CodeExecutionTool): # pragma: no branch
465
- raise UserError('`CodeExecutionTool` is not supported by OpenAI')
485
+ else:
486
+ raise UserError(
487
+ f'`{tool.__class__.__name__}` is not supported by `OpenAIModel`. If it should be, please file an issue.'
488
+ )
466
489
 
467
490
  async def _map_messages(self, messages: list[ModelMessage]) -> list[chat.ChatCompletionMessageParam]:
468
491
  """Just maps a `pydantic_ai.Message` to a `openai.types.ChatCompletionMessageParam`."""
@@ -473,7 +496,7 @@ class OpenAIModel(Model):
473
496
  openai_messages.append(item)
474
497
  elif isinstance(message, ModelResponse):
475
498
  texts: list[str] = []
476
- tool_calls: list[chat.ChatCompletionMessageToolCallParam] = []
499
+ tool_calls: list[ChatCompletionMessageFunctionToolCallParam] = []
477
500
  for item in message.parts:
478
501
  if isinstance(item, TextPart):
479
502
  texts.append(item.content)
@@ -504,8 +527,8 @@ class OpenAIModel(Model):
504
527
  return openai_messages
505
528
 
506
529
  @staticmethod
507
- def _map_tool_call(t: ToolCallPart) -> chat.ChatCompletionMessageToolCallParam:
508
- return chat.ChatCompletionMessageToolCallParam(
530
+ def _map_tool_call(t: ToolCallPart) -> ChatCompletionMessageFunctionToolCallParam:
531
+ return ChatCompletionMessageFunctionToolCallParam(
509
532
  id=_guard_tool_call_id(t=t),
510
533
  type='function',
511
534
  function={'name': t.tool_name, 'arguments': t.args_as_json_str()},
@@ -631,14 +654,6 @@ class OpenAIResponsesModel(Model):
631
654
  The [OpenAI Responses API](https://platform.openai.com/docs/api-reference/responses) is the
632
655
  new API for OpenAI models.
633
656
 
634
- The Responses API has built-in tools, that you can use instead of building your own:
635
-
636
- - [Web search](https://platform.openai.com/docs/guides/tools-web-search)
637
- - [File search](https://platform.openai.com/docs/guides/tools-file-search)
638
- - [Computer use](https://platform.openai.com/docs/guides/tools-computer-use)
639
-
640
- Use the `openai_builtin_tools` setting to add these tools to your model.
641
-
642
657
  If you are interested in the differences between the Responses API and the Chat Completions API,
643
658
  see the [OpenAI API docs](https://platform.openai.com/docs/guides/responses-vs-chat-completions).
644
659
  """
@@ -702,13 +717,14 @@ class OpenAIResponsesModel(Model):
702
717
  messages: list[ModelMessage],
703
718
  model_settings: ModelSettings | None,
704
719
  model_request_parameters: ModelRequestParameters,
720
+ run_context: RunContext[Any] | None = None,
705
721
  ) -> AsyncIterator[StreamedResponse]:
706
722
  check_allow_model_requests()
707
723
  response = await self._responses_create(
708
724
  messages, True, cast(OpenAIResponsesModelSettings, model_settings or {}), model_request_parameters
709
725
  )
710
726
  async with response:
711
- yield await self._process_streamed_response(response)
727
+ yield await self._process_streamed_response(response, model_request_parameters)
712
728
 
713
729
  def _process_response(self, response: responses.Response) -> ModelResponse:
714
730
  """Process a non-streamed response, and prepare a message to return."""
@@ -735,7 +751,9 @@ class OpenAIResponsesModel(Model):
735
751
  )
736
752
 
737
753
  async def _process_streamed_response(
738
- self, response: AsyncStream[responses.ResponseStreamEvent]
754
+ self,
755
+ response: AsyncStream[responses.ResponseStreamEvent],
756
+ model_request_parameters: ModelRequestParameters,
739
757
  ) -> OpenAIResponsesStreamedResponse:
740
758
  """Process a streamed response, and prepare a streaming response to return."""
741
759
  peekable_response = _utils.PeekableAsyncStream(response)
@@ -745,6 +763,7 @@ class OpenAIResponsesModel(Model):
745
763
 
746
764
  assert isinstance(first_chunk, responses.ResponseCreatedEvent)
747
765
  return OpenAIResponsesStreamedResponse(
766
+ model_request_parameters=model_request_parameters,
748
767
  _model_name=self._model_name,
749
768
  _response=peekable_response,
750
769
  _timestamp=number_to_datetime(first_chunk.response.created_at),
@@ -775,8 +794,11 @@ class OpenAIResponsesModel(Model):
775
794
  model_settings: OpenAIResponsesModelSettings,
776
795
  model_request_parameters: ModelRequestParameters,
777
796
  ) -> responses.Response | AsyncStream[responses.ResponseStreamEvent]:
778
- tools = self._get_tools(model_request_parameters)
779
- tools = self._get_builtin_tools(model_request_parameters) + tools
797
+ tools = (
798
+ self._get_builtin_tools(model_request_parameters)
799
+ + list(model_settings.get('openai_builtin_tools', []))
800
+ + self._get_tools(model_request_parameters)
801
+ )
780
802
 
781
803
  if not tools:
782
804
  tool_choice: Literal['none', 'required', 'auto'] | None = None
@@ -805,6 +827,10 @@ class OpenAIResponsesModel(Model):
805
827
  openai_messages.insert(0, responses.EasyInputMessageParam(role='system', content=instructions))
806
828
  instructions = NOT_GIVEN
807
829
 
830
+ if verbosity := model_settings.get('openai_text_verbosity'):
831
+ text = text or {}
832
+ text['verbosity'] = verbosity
833
+
808
834
  sampling_settings = (
809
835
  model_settings
810
836
  if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
@@ -859,10 +885,7 @@ class OpenAIResponsesModel(Model):
859
885
  return Reasoning(effort=reasoning_effort, summary=reasoning_summary)
860
886
 
861
887
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.FunctionToolParam]:
862
- tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
863
- if model_request_parameters.output_tools:
864
- tools += [self._map_tool_definition(r) for r in model_request_parameters.output_tools]
865
- return tools
888
+ return [self._map_tool_definition(r) for r in model_request_parameters.tool_defs.values()]
866
889
 
867
890
  def _get_builtin_tools(self, model_request_parameters: ModelRequestParameters) -> list[responses.ToolParam]:
868
891
  tools: list[responses.ToolParam] = []
@@ -878,6 +901,10 @@ class OpenAIResponsesModel(Model):
878
901
  tools.append(web_search_tool)
879
902
  elif isinstance(tool, CodeExecutionTool): # pragma: no branch
880
903
  tools.append({'type': 'code_interpreter', 'container': {'type': 'auto'}})
904
+ else:
905
+ raise UserError( # pragma: no cover
906
+ f'`{tool.__class__.__name__}` is not supported by `OpenAIResponsesModel`. If it should be, please file an issue.'
907
+ )
881
908
  return tools
882
909
 
883
910
  def _map_tool_definition(self, f: ToolDefinition) -> responses.FunctionToolParam:
@@ -12,6 +12,7 @@ import pydantic_core
12
12
  from typing_extensions import assert_never
13
13
 
14
14
  from .. import _utils
15
+ from .._run_context import RunContext
15
16
  from ..exceptions import UserError
16
17
  from ..messages import (
17
18
  BuiltinToolCallPart,
@@ -121,11 +122,13 @@ class TestModel(Model):
121
122
  messages: list[ModelMessage],
122
123
  model_settings: ModelSettings | None,
123
124
  model_request_parameters: ModelRequestParameters,
125
+ run_context: RunContext[Any] | None = None,
124
126
  ) -> AsyncIterator[StreamedResponse]:
125
127
  self.last_model_request_parameters = model_request_parameters
126
128
 
127
129
  model_response = self._request(messages, model_settings, model_request_parameters)
128
130
  yield TestStreamedResponse(
131
+ model_request_parameters=model_request_parameters,
129
132
  _model_name=self._model_name,
130
133
  _structured_response=model_response,
131
134
  _messages=messages,
@@ -6,6 +6,7 @@ from dataclasses import dataclass
6
6
  from functools import cached_property
7
7
  from typing import Any
8
8
 
9
+ from .._run_context import RunContext
9
10
  from ..messages import ModelMessage, ModelResponse
10
11
  from ..profiles import ModelProfile
11
12
  from ..settings import ModelSettings
@@ -35,8 +36,11 @@ class WrapperModel(Model):
35
36
  messages: list[ModelMessage],
36
37
  model_settings: ModelSettings | None,
37
38
  model_request_parameters: ModelRequestParameters,
39
+ run_context: RunContext[Any] | None = None,
38
40
  ) -> AsyncIterator[StreamedResponse]:
39
- async with self.wrapped.request_stream(messages, model_settings, model_request_parameters) as response_stream:
41
+ async with self.wrapped.request_stream(
42
+ messages, model_settings, model_request_parameters, run_context
43
+ ) as response_stream:
40
44
  yield response_stream
41
45
 
42
46
  def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
@@ -60,4 +64,4 @@ class WrapperModel(Model):
60
64
  return self.wrapped.settings
61
65
 
62
66
  def __getattr__(self, item: str):
63
- return getattr(self.wrapped, item) # pragma: no cover
67
+ return getattr(self.wrapped, item)
@@ -32,7 +32,7 @@ class OpenAIModelProfile(ModelProfile):
32
32
 
33
33
  def openai_model_profile(model_name: str) -> ModelProfile:
34
34
  """Get the model profile for an OpenAI model."""
35
- is_reasoning_model = model_name.startswith('o')
35
+ is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5')
36
36
  # Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
37
37
  # We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
38
38
  # when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
@@ -12,8 +12,8 @@ from pydantic_ai.profiles.google import google_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
14
14
  try:
15
- from google import genai
16
15
  from google.auth.credentials import Credentials
16
+ from google.genai import Client
17
17
  from google.genai.types import HttpOptionsDict
18
18
  except ImportError as _import_error:
19
19
  raise ImportError(
@@ -22,7 +22,7 @@ except ImportError as _import_error:
22
22
  ) from _import_error
23
23
 
24
24
 
25
- class GoogleProvider(Provider[genai.Client]):
25
+ class GoogleProvider(Provider[Client]):
26
26
  """Provider for Google."""
27
27
 
28
28
  @property
@@ -34,7 +34,7 @@ class GoogleProvider(Provider[genai.Client]):
34
34
  return str(self._client._api_client._http_options.base_url) # type: ignore[reportPrivateUsage]
35
35
 
36
36
  @property
37
- def client(self) -> genai.Client:
37
+ def client(self) -> Client:
38
38
  return self._client
39
39
 
40
40
  def model_profile(self, model_name: str) -> ModelProfile | None:
@@ -53,7 +53,7 @@ class GoogleProvider(Provider[genai.Client]):
53
53
  ) -> None: ...
54
54
 
55
55
  @overload
56
- def __init__(self, *, client: genai.Client) -> None: ...
56
+ def __init__(self, *, client: Client) -> None: ...
57
57
 
58
58
  @overload
59
59
  def __init__(self, *, vertexai: bool = False) -> None: ...
@@ -65,7 +65,7 @@ class GoogleProvider(Provider[genai.Client]):
65
65
  credentials: Credentials | None = None,
66
66
  project: str | None = None,
67
67
  location: VertexAILocation | Literal['global'] | None = None,
68
- client: genai.Client | None = None,
68
+ client: Client | None = None,
69
69
  vertexai: bool | None = None,
70
70
  ) -> None:
71
71
  """Create a new Google provider.
@@ -102,9 +102,9 @@ class GoogleProvider(Provider[genai.Client]):
102
102
  'Set the `GOOGLE_API_KEY` environment variable or pass it via `GoogleProvider(api_key=...)`'
103
103
  'to use the Google Generative Language API.'
104
104
  )
105
- self._client = genai.Client(vertexai=vertexai, api_key=api_key, http_options=http_options)
105
+ self._client = Client(vertexai=vertexai, api_key=api_key, http_options=http_options)
106
106
  else:
107
- self._client = genai.Client(
107
+ self._client = Client(
108
108
  vertexai=vertexai,
109
109
  project=project or os.environ.get('GOOGLE_CLOUD_PROJECT'),
110
110
  # From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149: