pydantic-ai-slim 0.3.4__tar.gz → 0.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (81) hide show
  1. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_agent_graph.py +35 -7
  3. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_output.py +1 -0
  4. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_utils.py +16 -0
  5. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/agent.py +38 -28
  6. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/messages.py +44 -6
  7. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/fallback.py +2 -1
  8. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/openai.py +12 -1
  9. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/output.py +4 -0
  10. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google.py +6 -1
  11. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/tools.py +20 -3
  12. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/.gitignore +0 -0
  13. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/LICENSE +0 -0
  14. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/README.md +0 -0
  15. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/__init__.py +0 -0
  16. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/__main__.py +0 -0
  17. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_a2a.py +0 -0
  18. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_cli.py +0 -0
  19. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_function_schema.py +0 -0
  20. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_griffe.py +0 -0
  21. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_mcp.py +0 -0
  22. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_parts_manager.py +0 -0
  23. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_run_context.py +0 -0
  24. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_system_prompt.py +0 -0
  25. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/_thinking_part.py +0 -0
  26. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/__init__.py +0 -0
  27. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  28. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/tavily.py +0 -0
  29. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/direct.py +0 -0
  30. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/exceptions.py +0 -0
  31. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/ext/__init__.py +0 -0
  32. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/ext/langchain.py +0 -0
  33. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/format_as_xml.py +0 -0
  34. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/format_prompt.py +0 -0
  35. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/mcp.py +0 -0
  36. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/__init__.py +0 -0
  37. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/anthropic.py +0 -0
  38. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/bedrock.py +0 -0
  39. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/cohere.py +0 -0
  40. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/function.py +0 -0
  41. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/gemini.py +0 -0
  42. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/google.py +0 -0
  43. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/groq.py +0 -0
  44. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/instrumented.py +0 -0
  45. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/mcp_sampling.py +0 -0
  46. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/mistral.py +0 -0
  47. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/test.py +0 -0
  48. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/wrapper.py +0 -0
  49. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/__init__.py +0 -0
  50. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/_json_schema.py +0 -0
  51. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/amazon.py +0 -0
  52. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/anthropic.py +0 -0
  53. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/cohere.py +0 -0
  54. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/deepseek.py +0 -0
  55. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/google.py +0 -0
  56. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/grok.py +0 -0
  57. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/meta.py +0 -0
  58. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/mistral.py +0 -0
  59. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/openai.py +0 -0
  60. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/qwen.py +0 -0
  61. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/__init__.py +0 -0
  62. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/anthropic.py +0 -0
  63. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/azure.py +0 -0
  64. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/bedrock.py +0 -0
  65. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/cohere.py +0 -0
  66. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/deepseek.py +0 -0
  67. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/fireworks.py +0 -0
  68. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google_gla.py +0 -0
  69. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google_vertex.py +0 -0
  70. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/grok.py +0 -0
  71. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/groq.py +0 -0
  72. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/heroku.py +0 -0
  73. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/mistral.py +0 -0
  74. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/openai.py +0 -0
  75. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/openrouter.py +0 -0
  76. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/together.py +0 -0
  77. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/py.typed +0 -0
  78. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/result.py +0 -0
  79. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/settings.py +0 -0
  80. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pydantic_ai/usage.py +0 -0
  81. {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.6}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.3.4
33
+ Requires-Dist: pydantic-graph==0.3.6
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.3.4; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.6; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.3.4; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.6; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -641,7 +641,6 @@ async def process_function_tools( # noqa C901
641
641
  run_context = build_run_context(ctx)
642
642
 
643
643
  calls_to_run: list[tuple[Tool[DepsT], _messages.ToolCallPart]] = []
644
- call_index_to_event_id: dict[int, str] = {}
645
644
  for call in tool_calls:
646
645
  if (
647
646
  call.tool_name == output_tool_name
@@ -668,7 +667,6 @@ async def process_function_tools( # noqa C901
668
667
  else:
669
668
  event = _messages.FunctionToolCallEvent(call)
670
669
  yield event
671
- call_index_to_event_id[len(calls_to_run)] = event.call_id
672
670
  calls_to_run.append((tool, call))
673
671
  elif mcp_tool := await _tool_from_mcp_server(call.tool_name, ctx):
674
672
  if stub_function_tools:
@@ -683,7 +681,6 @@ async def process_function_tools( # noqa C901
683
681
  else:
684
682
  event = _messages.FunctionToolCallEvent(call)
685
683
  yield event
686
- call_index_to_event_id[len(calls_to_run)] = event.call_id
687
684
  calls_to_run.append((mcp_tool, call))
688
685
  elif call.tool_name in output_schema.tools:
689
686
  # if tool_name is in output_schema, it means we found a output tool but an error occurred in
@@ -700,13 +697,13 @@ async def process_function_tools( # noqa C901
700
697
  content=content,
701
698
  tool_call_id=call.tool_call_id,
702
699
  )
703
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
700
+ yield _messages.FunctionToolResultEvent(part)
704
701
  output_parts.append(part)
705
702
  else:
706
703
  yield _messages.FunctionToolCallEvent(call)
707
704
 
708
705
  part = _unknown_tool(call.tool_name, call.tool_call_id, ctx)
709
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
706
+ yield _messages.FunctionToolResultEvent(part)
710
707
  output_parts.append(part)
711
708
 
712
709
  if not calls_to_run:
@@ -738,11 +735,35 @@ async def process_function_tools( # noqa C901
738
735
  for task in done:
739
736
  index = tasks.index(task)
740
737
  result = task.result()
741
- yield _messages.FunctionToolResultEvent(result, tool_call_id=call_index_to_event_id[index])
738
+ yield _messages.FunctionToolResultEvent(result)
742
739
 
743
740
  if isinstance(result, _messages.RetryPromptPart):
744
741
  results_by_index[index] = result
745
742
  elif isinstance(result, _messages.ToolReturnPart):
743
+ if isinstance(result.content, _messages.ToolReturn):
744
+ tool_return = result.content
745
+ if (
746
+ isinstance(tool_return.return_value, _messages.MultiModalContentTypes)
747
+ or isinstance(tool_return.return_value, list)
748
+ and any(
749
+ isinstance(content, _messages.MultiModalContentTypes)
750
+ for content in tool_return.return_value # type: ignore
751
+ )
752
+ ):
753
+ raise exceptions.UserError(
754
+ f"{result.tool_name}'s `return_value` contains invalid nested MultiModalContentTypes objects. "
755
+ f'Please use `content` instead.'
756
+ )
757
+ result.content = tool_return.return_value # type: ignore
758
+ result.metadata = tool_return.metadata
759
+ if tool_return.content:
760
+ user_parts.append(
761
+ _messages.UserPromptPart(
762
+ content=list(tool_return.content),
763
+ timestamp=result.timestamp,
764
+ part_kind='user-prompt',
765
+ )
766
+ )
746
767
  contents: list[Any]
747
768
  single_content: bool
748
769
  if isinstance(result.content, list):
@@ -754,7 +775,13 @@ async def process_function_tools( # noqa C901
754
775
 
755
776
  processed_contents: list[Any] = []
756
777
  for content in contents:
757
- if isinstance(content, _messages.MultiModalContentTypes):
778
+ if isinstance(content, _messages.ToolReturn):
779
+ raise exceptions.UserError(
780
+ f"{result.tool_name}'s return contains invalid nested ToolReturn objects. "
781
+ f'ToolReturn should be used directly.'
782
+ )
783
+ elif isinstance(content, _messages.MultiModalContentTypes):
784
+ # Handle direct multimodal content
758
785
  if isinstance(content, _messages.BinaryContent):
759
786
  identifier = multi_modal_content_identifier(content.data)
760
787
  else:
@@ -769,6 +796,7 @@ async def process_function_tools( # noqa C901
769
796
  )
770
797
  processed_contents.append(f'See file {identifier}')
771
798
  else:
799
+ # Handle regular content
772
800
  processed_contents.append(content)
773
801
 
774
802
  if single_content:
@@ -182,6 +182,7 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
182
182
  _flatten_output_spec(output_spec.outputs),
183
183
  name=output_spec.name,
184
184
  description=output_spec.description,
185
+ strict=output_spec.strict,
185
186
  )
186
187
  )
187
188
  elif isinstance(output_spec, PromptedOutput):
@@ -31,6 +31,8 @@ from typing_inspection.introspection import is_union_origin
31
31
 
32
32
  from pydantic_graph._utils import AbstractSpan
33
33
 
34
+ from . import exceptions
35
+
34
36
  AbstractSpan = AbstractSpan
35
37
 
36
38
  if TYPE_CHECKING:
@@ -415,6 +417,20 @@ def merge_json_schema_defs(schemas: list[dict[str, Any]]) -> tuple[list[dict[str
415
417
  return rewritten_schemas, all_defs
416
418
 
417
419
 
420
+ def validate_empty_kwargs(_kwargs: dict[str, Any]) -> None:
421
+ """Validate that no unknown kwargs remain after processing.
422
+
423
+ Args:
424
+ _kwargs: Dictionary of remaining kwargs after specific ones have been processed.
425
+
426
+ Raises:
427
+ UserError: If any unknown kwargs remain.
428
+ """
429
+ if _kwargs:
430
+ unknown_kwargs = ', '.join(f'`{k}`' for k in _kwargs.keys())
431
+ raise exceptions.UserError(f'Unknown keyword arguments: {unknown_kwargs}')
432
+
433
+
418
434
  def strip_markdown_fences(text: str) -> str:
419
435
  if text.startswith('{'):
420
436
  return text
@@ -6,6 +6,7 @@ import json
6
6
  import warnings
7
7
  from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
8
8
  from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager
9
+ from contextvars import ContextVar
9
10
  from copy import deepcopy
10
11
  from types import FrameType
11
12
  from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, cast, final, overload
@@ -157,8 +158,6 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
157
158
  _mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
158
159
  _default_retries: int = dataclasses.field(repr=False)
159
160
  _max_result_retries: int = dataclasses.field(repr=False)
160
- _override_deps: _utils.Option[AgentDepsT] = dataclasses.field(default=None, repr=False)
161
- _override_model: _utils.Option[models.Model] = dataclasses.field(default=None, repr=False)
162
161
 
163
162
  @overload
164
163
  def __init__(
@@ -294,11 +293,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
294
293
  self.name = name
295
294
  self.model_settings = model_settings
296
295
 
297
- if 'result_type' in _deprecated_kwargs: # pragma: no cover
298
- if output_type is not str:
296
+ if 'result_type' in _deprecated_kwargs:
297
+ if output_type is not str: # pragma: no cover
299
298
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
300
299
  warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
301
- output_type = _deprecated_kwargs['result_type']
300
+ output_type = _deprecated_kwargs.pop('result_type')
302
301
 
303
302
  self.output_type = output_type
304
303
 
@@ -306,22 +305,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
306
305
 
307
306
  self._deps_type = deps_type
308
307
 
309
- self._deprecated_result_tool_name = _deprecated_kwargs.get('result_tool_name')
310
- if self._deprecated_result_tool_name is not None: # pragma: no cover
308
+ self._deprecated_result_tool_name = _deprecated_kwargs.pop('result_tool_name', None)
309
+ if self._deprecated_result_tool_name is not None:
311
310
  warnings.warn(
312
311
  '`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
313
312
  DeprecationWarning,
314
313
  )
315
314
 
316
- self._deprecated_result_tool_description = _deprecated_kwargs.get('result_tool_description')
317
- if self._deprecated_result_tool_description is not None: # pragma: no cover
315
+ self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
316
+ if self._deprecated_result_tool_description is not None:
318
317
  warnings.warn(
319
318
  '`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
320
319
  DeprecationWarning,
321
320
  )
322
- result_retries = _deprecated_kwargs.get('result_retries')
323
- if result_retries is not None: # pragma: no cover
324
- if output_retries is not None:
321
+ result_retries = _deprecated_kwargs.pop('result_retries', None)
322
+ if result_retries is not None:
323
+ if output_retries is not None: # pragma: no cover
325
324
  raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
326
325
  warnings.warn('`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning)
327
326
  output_retries = result_retries
@@ -329,6 +328,8 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
329
328
  default_output_mode = (
330
329
  self.model.profile.default_structured_output_mode if isinstance(self.model, models.Model) else None
331
330
  )
331
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
332
+
332
333
  self._output_schema = _output.OutputSchema[OutputDataT].build(
333
334
  output_type,
334
335
  default_mode=default_output_mode,
@@ -365,6 +366,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
365
366
  else:
366
367
  self._register_tool(Tool(tool))
367
368
 
369
+ self._override_deps: ContextVar[_utils.Option[AgentDepsT]] = ContextVar('_override_deps', default=None)
370
+ self._override_model: ContextVar[_utils.Option[models.Model]] = ContextVar('_override_model', default=None)
371
+
368
372
  @staticmethod
369
373
  def instrument_all(instrument: InstrumentationSettings | bool = True) -> None:
370
374
  """Set the instrumentation options for all agents where `instrument` is not set."""
@@ -469,7 +473,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
469
473
  if output_type is not str:
470
474
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
471
475
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
472
- output_type = _deprecated_kwargs['result_type']
476
+ output_type = _deprecated_kwargs.pop('result_type')
477
+
478
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
473
479
 
474
480
  async with self.iter(
475
481
  user_prompt=user_prompt,
@@ -635,7 +641,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
635
641
  if output_type is not str:
636
642
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
637
643
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
638
- output_type = _deprecated_kwargs['result_type']
644
+ output_type = _deprecated_kwargs.pop('result_type')
645
+
646
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
639
647
 
640
648
  deps = self._get_deps(deps)
641
649
  new_message_index = len(message_history) if message_history else 0
@@ -872,7 +880,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
872
880
  if output_type is not str:
873
881
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
874
882
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
875
- output_type = _deprecated_kwargs['result_type']
883
+ output_type = _deprecated_kwargs.pop('result_type')
884
+
885
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
876
886
 
877
887
  return get_event_loop().run_until_complete(
878
888
  self.run(
@@ -988,7 +998,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
988
998
  if output_type is not str:
989
999
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
990
1000
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
991
- output_type = _deprecated_kwargs['result_type']
1001
+ output_type = _deprecated_kwargs.pop('result_type')
1002
+
1003
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
992
1004
 
993
1005
  yielded = False
994
1006
  async with self.iter(
@@ -1103,24 +1115,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1103
1115
  model: The model to use instead of the model passed to the agent run.
1104
1116
  """
1105
1117
  if _utils.is_set(deps):
1106
- override_deps_before = self._override_deps
1107
- self._override_deps = _utils.Some(deps)
1118
+ deps_token = self._override_deps.set(_utils.Some(deps))
1108
1119
  else:
1109
- override_deps_before = _utils.UNSET
1120
+ deps_token = None
1110
1121
 
1111
1122
  if _utils.is_set(model):
1112
- override_model_before = self._override_model
1113
- self._override_model = _utils.Some(models.infer_model(model))
1123
+ model_token = self._override_model.set(_utils.Some(models.infer_model(model)))
1114
1124
  else:
1115
- override_model_before = _utils.UNSET
1125
+ model_token = None
1116
1126
 
1117
1127
  try:
1118
1128
  yield
1119
1129
  finally:
1120
- if _utils.is_set(override_deps_before):
1121
- self._override_deps = override_deps_before
1122
- if _utils.is_set(override_model_before):
1123
- self._override_model = override_model_before
1130
+ if deps_token is not None:
1131
+ self._override_deps.reset(deps_token)
1132
+ if model_token is not None:
1133
+ self._override_model.reset(model_token)
1124
1134
 
1125
1135
  @overload
1126
1136
  def instructions(
@@ -1594,7 +1604,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1594
1604
  The model used
1595
1605
  """
1596
1606
  model_: models.Model
1597
- if some_model := self._override_model:
1607
+ if some_model := self._override_model.get():
1598
1608
  # we don't want `override()` to cover up errors from the model not being defined, hence this check
1599
1609
  if model is None and self.model is None:
1600
1610
  raise exceptions.UserError(
@@ -1623,7 +1633,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1623
1633
 
1624
1634
  We could do runtime type checking of deps against `self._deps_type`, but that's a slippery slope.
1625
1635
  """
1626
- if some_deps := self._override_deps:
1636
+ if some_deps := self._override_deps.get():
1627
1637
  return some_deps.value
1628
1638
  else:
1629
1639
  return deps
@@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Annotated, Any, Literal, Union, cast, overload
11
11
  import pydantic
12
12
  import pydantic_core
13
13
  from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
14
- from typing_extensions import TypeAlias
14
+ from typing_extensions import TypeAlias, deprecated
15
15
 
16
16
  from . import _utils
17
17
  from ._utils import (
@@ -306,6 +306,29 @@ class BinaryContent:
306
306
 
307
307
  UserContent: TypeAlias = 'str | ImageUrl | AudioUrl | DocumentUrl | VideoUrl | BinaryContent'
308
308
 
309
+
310
+ @dataclass(repr=False)
311
+ class ToolReturn:
312
+ """A structured return value for tools that need to provide both a return value and custom content to the model.
313
+
314
+ This class allows tools to return complex responses that include:
315
+ - A return value for actual tool return
316
+ - Custom content (including multi-modal content) to be sent to the model as a UserPromptPart
317
+ - Optional metadata for application use
318
+ """
319
+
320
+ return_value: Any
321
+ """The return value to be used in the tool response."""
322
+
323
+ content: Sequence[UserContent] | None = None
324
+ """The content sequence to be sent to the model as a UserPromptPart."""
325
+
326
+ metadata: Any = None
327
+ """Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
328
+
329
+ __repr__ = _utils.dataclasses_no_defaults_repr
330
+
331
+
309
332
  # Ideally this would be a Union of types, but Python 3.9 requires it to be a string, and strings don't work with `isinstance``.
310
333
  MultiModalContentTypes = (ImageUrl, AudioUrl, DocumentUrl, VideoUrl, BinaryContent)
311
334
  _document_format_lookup: dict[str, DocumentFormat] = {
@@ -396,6 +419,9 @@ class ToolReturnPart:
396
419
  tool_call_id: str
397
420
  """The tool call identifier, this is used by some models including OpenAI."""
398
421
 
422
+ metadata: Any = None
423
+ """Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
424
+
399
425
  timestamp: datetime = field(default_factory=_now_utc)
400
426
  """The timestamp, when the tool returned."""
401
427
 
@@ -475,7 +501,10 @@ class RetryPromptPart:
475
501
  def model_response(self) -> str:
476
502
  """Return a string message describing why the retry is requested."""
477
503
  if isinstance(self.content, str):
478
- description = self.content
504
+ if self.tool_name is None:
505
+ description = f'Validation feedback:\n{self.content}'
506
+ else:
507
+ description = self.content
479
508
  else:
480
509
  json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2)
481
510
  description = f'{len(self.content)} validation errors: {json_errors.decode()}'
@@ -983,10 +1012,16 @@ class FunctionToolCallEvent:
983
1012
  """Event type identifier, used as a discriminator."""
984
1013
 
985
1014
  @property
986
- def call_id(self) -> str:
987
- """An ID used for matching details about the call to its result. If present, defaults to the part's tool_call_id."""
1015
+ def tool_call_id(self) -> str:
1016
+ """An ID used for matching details about the call to its result."""
988
1017
  return self.part.tool_call_id
989
1018
 
1019
+ @property
1020
+ @deprecated('`call_id` is deprecated, use `tool_call_id` instead.')
1021
+ def call_id(self) -> str:
1022
+ """An ID used for matching details about the call to its result."""
1023
+ return self.part.tool_call_id # pragma: no cover
1024
+
990
1025
  __repr__ = _utils.dataclasses_no_defaults_repr
991
1026
 
992
1027
 
@@ -996,11 +1031,14 @@ class FunctionToolResultEvent:
996
1031
 
997
1032
  result: ToolReturnPart | RetryPromptPart
998
1033
  """The result of the call to the function tool."""
999
- tool_call_id: str
1000
- """An ID used to match the result to its original call."""
1001
1034
  event_kind: Literal['function_tool_result'] = 'function_tool_result'
1002
1035
  """Event type identifier, used as a discriminator."""
1003
1036
 
1037
+ @property
1038
+ def tool_call_id(self) -> str:
1039
+ """An ID used to match the result to its original call."""
1040
+ return self.result.tool_call_id
1041
+
1004
1042
  __repr__ = _utils.dataclasses_no_defaults_repr
1005
1043
 
1006
1044
 
@@ -87,10 +87,11 @@ class FallbackModel(Model):
87
87
  exceptions: list[Exception] = []
88
88
 
89
89
  for model in self.models:
90
+ customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
90
91
  async with AsyncExitStack() as stack:
91
92
  try:
92
93
  response = await stack.enter_async_context(
93
- model.request_stream(messages, model_settings, model_request_parameters)
94
+ model.request_stream(messages, model_settings, customized_model_request_parameters)
94
95
  )
95
96
  except Exception as exc:
96
97
  if self._fallback_on(exc):
@@ -61,6 +61,7 @@ try:
61
61
  from openai.types.chat.chat_completion_content_part_image_param import ImageURL
62
62
  from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
63
63
  from openai.types.chat.chat_completion_content_part_param import File, FileFile
64
+ from openai.types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
64
65
  from openai.types.responses import ComputerToolParam, FileSearchToolParam, WebSearchToolParam
65
66
  from openai.types.responses.response_input_param import FunctionCallOutput, Message
66
67
  from openai.types.shared import ReasoningEffort
@@ -126,6 +127,12 @@ class OpenAIModelSettings(ModelSettings, total=False):
126
127
  For more information, see [OpenAI's service tiers documentation](https://platform.openai.com/docs/api-reference/chat/object#chat/object-service_tier).
127
128
  """
128
129
 
130
+ openai_prediction: ChatCompletionPredictionContentParam
131
+ """Enables [predictive outputs](https://platform.openai.com/docs/guides/predicted-outputs).
132
+
133
+ This feature is currently only supported for some OpenAI models.
134
+ """
135
+
129
136
 
130
137
  class OpenAIResponsesModelSettings(OpenAIModelSettings, total=False):
131
138
  """Settings used for an OpenAI Responses model request.
@@ -320,6 +327,7 @@ class OpenAIModel(Model):
320
327
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
321
328
  user=model_settings.get('openai_user', NOT_GIVEN),
322
329
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
330
+ prediction=model_settings.get('openai_prediction', NOT_GIVEN),
323
331
  temperature=sampling_settings.get('temperature', NOT_GIVEN),
324
332
  top_p=sampling_settings.get('top_p', NOT_GIVEN),
325
333
  presence_penalty=sampling_settings.get('presence_penalty', NOT_GIVEN),
@@ -644,13 +652,16 @@ class OpenAIResponsesModel(Model):
644
652
  """Process a non-streamed response, and prepare a message to return."""
645
653
  timestamp = number_to_datetime(response.created_at)
646
654
  items: list[ModelResponsePart] = []
647
- items.append(TextPart(response.output_text))
648
655
  for item in response.output:
649
656
  if item.type == 'reasoning':
650
657
  for summary in item.summary:
651
658
  # NOTE: We use the same id for all summaries because we can merge them on the round trip.
652
659
  # The providers don't force the signature to be unique.
653
660
  items.append(ThinkingPart(content=summary.text, id=item.id))
661
+ elif item.type == 'message':
662
+ for content in item.content:
663
+ if content.type == 'output_text': # pragma: no branch
664
+ items.append(TextPart(content.text))
654
665
  elif item.type == 'function_call':
655
666
  items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
656
667
  return ModelResponse(
@@ -154,6 +154,8 @@ class NativeOutput(Generic[OutputDataT]):
154
154
  """The name of the structured output that will be passed to the model. If not specified and only one output is provided, the name of the output type or function will be used."""
155
155
  description: str | None
156
156
  """The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
157
+ strict: bool | None
158
+ """Whether to use strict mode for the output, if the model supports it."""
157
159
 
158
160
  def __init__(
159
161
  self,
@@ -161,10 +163,12 @@ class NativeOutput(Generic[OutputDataT]):
161
163
  *,
162
164
  name: str | None = None,
163
165
  description: str | None = None,
166
+ strict: bool | None = None,
164
167
  ):
165
168
  self.outputs = outputs
166
169
  self.name = name
167
170
  self.description = description
171
+ self.strict = strict
168
172
 
169
173
 
170
174
  @dataclass(init=False)
@@ -104,7 +104,12 @@ class GoogleProvider(Provider[genai.Client]):
104
104
  self._client = genai.Client(
105
105
  vertexai=vertexai,
106
106
  project=project or os.environ.get('GOOGLE_CLOUD_PROJECT'),
107
- location=location or os.environ.get('GOOGLE_CLOUD_LOCATION'),
107
+ # From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149:
108
+ # Currently `us-central1` supports the most models by far of any region including `global`, but not
109
+ # all of them. `us-central1` has all google models but is missing some Anthropic partner models,
110
+ # which use `us-east5` instead. `global` has fewer models but higher availability.
111
+ # For more details, check: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
112
+ location=location or os.environ.get('GOOGLE_CLOUD_LOCATION') or 'us-central1',
108
113
  credentials=credentials,
109
114
  http_options={'headers': {'User-Agent': get_user_agent()}},
110
115
  )
@@ -32,6 +32,7 @@ __all__ = (
32
32
  'ToolDefinition',
33
33
  )
34
34
 
35
+ from .messages import ToolReturnPart
35
36
 
36
37
  ToolParams = ParamSpec('ToolParams', default=...)
37
38
  """Retrieval function param spec."""
@@ -346,15 +347,31 @@ class Tool(Generic[AgentDepsT]):
346
347
  {
347
348
  'type': 'object',
348
349
  'properties': {
349
- **({'tool_arguments': {'type': 'object'}} if include_content else {}),
350
+ **(
351
+ {
352
+ 'tool_arguments': {'type': 'object'},
353
+ 'tool_response': {'type': 'object'},
354
+ }
355
+ if include_content
356
+ else {}
357
+ ),
350
358
  'gen_ai.tool.name': {},
351
359
  'gen_ai.tool.call.id': {},
352
360
  },
353
361
  }
354
362
  ),
355
363
  }
356
- with tracer.start_as_current_span('running tool', attributes=span_attributes):
357
- return await self._run(message, run_context)
364
+ with tracer.start_as_current_span('running tool', attributes=span_attributes) as span:
365
+ response = await self._run(message, run_context)
366
+ if include_content and span.is_recording():
367
+ span.set_attribute(
368
+ 'tool_response',
369
+ response.model_response_str()
370
+ if isinstance(response, ToolReturnPart)
371
+ else response.model_response(),
372
+ )
373
+
374
+ return response
358
375
 
359
376
  async def _run(
360
377
  self, message: _messages.ToolCallPart, run_context: RunContext[AgentDepsT]