pydantic-ai-slim 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (81) hide show
  1. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_agent_graph.py +39 -2
  3. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_output.py +1 -0
  4. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_utils.py +16 -0
  5. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/agent.py +25 -14
  6. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/messages.py +46 -11
  7. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/anthropic.py +7 -9
  8. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/instrumented.py +5 -1
  9. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/openai.py +4 -1
  10. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/output.py +4 -0
  11. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google.py +6 -1
  12. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/tools.py +3 -2
  13. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/.gitignore +0 -0
  14. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/LICENSE +0 -0
  15. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/README.md +0 -0
  16. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/__init__.py +0 -0
  17. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/__main__.py +0 -0
  18. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_a2a.py +0 -0
  19. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_cli.py +0 -0
  20. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_function_schema.py +0 -0
  21. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_griffe.py +0 -0
  22. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_mcp.py +0 -0
  23. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_parts_manager.py +0 -0
  24. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_run_context.py +0 -0
  25. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_system_prompt.py +0 -0
  26. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/_thinking_part.py +0 -0
  27. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/__init__.py +0 -0
  28. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  29. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/tavily.py +0 -0
  30. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/direct.py +0 -0
  31. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/exceptions.py +0 -0
  32. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/ext/__init__.py +0 -0
  33. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/ext/langchain.py +0 -0
  34. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/format_as_xml.py +0 -0
  35. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/format_prompt.py +0 -0
  36. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/mcp.py +0 -0
  37. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/__init__.py +0 -0
  38. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/bedrock.py +0 -0
  39. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/cohere.py +0 -0
  40. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/fallback.py +0 -0
  41. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/function.py +0 -0
  42. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/gemini.py +0 -0
  43. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/google.py +0 -0
  44. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/groq.py +0 -0
  45. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/mcp_sampling.py +0 -0
  46. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/mistral.py +0 -0
  47. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/test.py +0 -0
  48. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/wrapper.py +0 -0
  49. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/__init__.py +0 -0
  50. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/_json_schema.py +0 -0
  51. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/amazon.py +0 -0
  52. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/anthropic.py +0 -0
  53. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/cohere.py +0 -0
  54. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/deepseek.py +0 -0
  55. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/google.py +0 -0
  56. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/grok.py +0 -0
  57. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/meta.py +0 -0
  58. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/mistral.py +0 -0
  59. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/openai.py +0 -0
  60. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/qwen.py +0 -0
  61. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/__init__.py +0 -0
  62. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/anthropic.py +0 -0
  63. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/azure.py +0 -0
  64. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/bedrock.py +0 -0
  65. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/cohere.py +0 -0
  66. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/deepseek.py +0 -0
  67. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/fireworks.py +0 -0
  68. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google_gla.py +0 -0
  69. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google_vertex.py +0 -0
  70. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/grok.py +0 -0
  71. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/groq.py +0 -0
  72. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/heroku.py +0 -0
  73. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/mistral.py +0 -0
  74. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/openai.py +0 -0
  75. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/openrouter.py +0 -0
  76. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/together.py +0 -0
  77. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/py.typed +0 -0
  78. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/result.py +0 -0
  79. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/settings.py +0 -0
  80. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pydantic_ai/usage.py +0 -0
  81. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.5}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.3.3
33
+ Requires-Dist: pydantic-graph==0.3.5
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.3.3; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.5; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.3.3; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.5; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -24,6 +24,7 @@ from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
24
24
 
25
25
  if TYPE_CHECKING:
26
26
  from .mcp import MCPServer
27
+ from .models.instrumented import InstrumentationSettings
27
28
 
28
29
  __all__ = (
29
30
  'GraphAgentState',
@@ -112,6 +113,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
112
113
  default_retries: int
113
114
 
114
115
  tracer: Tracer
116
+ instrumentation_settings: InstrumentationSettings | None = None
115
117
 
116
118
  prepare_tools: ToolsPrepareFunc[DepsT] | None = None
117
119
 
@@ -712,6 +714,10 @@ async def process_function_tools( # noqa C901
712
714
 
713
715
  user_parts: list[_messages.UserPromptPart] = []
714
716
 
717
+ include_content = (
718
+ ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content
719
+ )
720
+
715
721
  # Run all tool tasks in parallel
716
722
  results_by_index: dict[int, _messages.ModelRequestPart] = {}
717
723
  with ctx.deps.tracer.start_as_current_span(
@@ -722,7 +728,7 @@ async def process_function_tools( # noqa C901
722
728
  },
723
729
  ):
724
730
  tasks = [
725
- asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer), name=call.tool_name)
731
+ asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer, include_content), name=call.tool_name)
726
732
  for tool, call in calls_to_run
727
733
  ]
728
734
 
@@ -737,6 +743,30 @@ async def process_function_tools( # noqa C901
737
743
  if isinstance(result, _messages.RetryPromptPart):
738
744
  results_by_index[index] = result
739
745
  elif isinstance(result, _messages.ToolReturnPart):
746
+ if isinstance(result.content, _messages.ToolReturn):
747
+ tool_return = result.content
748
+ if (
749
+ isinstance(tool_return.return_value, _messages.MultiModalContentTypes)
750
+ or isinstance(tool_return.return_value, list)
751
+ and any(
752
+ isinstance(content, _messages.MultiModalContentTypes)
753
+ for content in tool_return.return_value # type: ignore
754
+ )
755
+ ):
756
+ raise exceptions.UserError(
757
+ f"{result.tool_name}'s `return_value` contains invalid nested MultiModalContentTypes objects. "
758
+ f'Please use `content` instead.'
759
+ )
760
+ result.content = tool_return.return_value # type: ignore
761
+ result.metadata = tool_return.metadata
762
+ if tool_return.content:
763
+ user_parts.append(
764
+ _messages.UserPromptPart(
765
+ content=list(tool_return.content),
766
+ timestamp=result.timestamp,
767
+ part_kind='user-prompt',
768
+ )
769
+ )
740
770
  contents: list[Any]
741
771
  single_content: bool
742
772
  if isinstance(result.content, list):
@@ -748,7 +778,13 @@ async def process_function_tools( # noqa C901
748
778
 
749
779
  processed_contents: list[Any] = []
750
780
  for content in contents:
751
- if isinstance(content, _messages.MultiModalContentTypes):
781
+ if isinstance(content, _messages.ToolReturn):
782
+ raise exceptions.UserError(
783
+ f"{result.tool_name}'s return contains invalid nested ToolReturn objects. "
784
+ f'ToolReturn should be used directly.'
785
+ )
786
+ elif isinstance(content, _messages.MultiModalContentTypes):
787
+ # Handle direct multimodal content
752
788
  if isinstance(content, _messages.BinaryContent):
753
789
  identifier = multi_modal_content_identifier(content.data)
754
790
  else:
@@ -763,6 +799,7 @@ async def process_function_tools( # noqa C901
763
799
  )
764
800
  processed_contents.append(f'See file {identifier}')
765
801
  else:
802
+ # Handle regular content
766
803
  processed_contents.append(content)
767
804
 
768
805
  if single_content:
@@ -182,6 +182,7 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
182
182
  _flatten_output_spec(output_spec.outputs),
183
183
  name=output_spec.name,
184
184
  description=output_spec.description,
185
+ strict=output_spec.strict,
185
186
  )
186
187
  )
187
188
  elif isinstance(output_spec, PromptedOutput):
@@ -31,6 +31,8 @@ from typing_inspection.introspection import is_union_origin
31
31
 
32
32
  from pydantic_graph._utils import AbstractSpan
33
33
 
34
+ from . import exceptions
35
+
34
36
  AbstractSpan = AbstractSpan
35
37
 
36
38
  if TYPE_CHECKING:
@@ -415,6 +417,20 @@ def merge_json_schema_defs(schemas: list[dict[str, Any]]) -> tuple[list[dict[str
415
417
  return rewritten_schemas, all_defs
416
418
 
417
419
 
420
+ def validate_empty_kwargs(_kwargs: dict[str, Any]) -> None:
421
+ """Validate that no unknown kwargs remain after processing.
422
+
423
+ Args:
424
+ _kwargs: Dictionary of remaining kwargs after specific ones have been processed.
425
+
426
+ Raises:
427
+ UserError: If any unknown kwargs remain.
428
+ """
429
+ if _kwargs:
430
+ unknown_kwargs = ', '.join(f'`{k}`' for k in _kwargs.keys())
431
+ raise exceptions.UserError(f'Unknown keyword arguments: {unknown_kwargs}')
432
+
433
+
418
434
  def strip_markdown_fences(text: str) -> str:
419
435
  if text.startswith('{'):
420
436
  return text
@@ -294,11 +294,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
294
294
  self.name = name
295
295
  self.model_settings = model_settings
296
296
 
297
- if 'result_type' in _deprecated_kwargs: # pragma: no cover
298
- if output_type is not str:
297
+ if 'result_type' in _deprecated_kwargs:
298
+ if output_type is not str: # pragma: no cover
299
299
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
300
300
  warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
301
- output_type = _deprecated_kwargs['result_type']
301
+ output_type = _deprecated_kwargs.pop('result_type')
302
302
 
303
303
  self.output_type = output_type
304
304
 
@@ -306,22 +306,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
306
306
 
307
307
  self._deps_type = deps_type
308
308
 
309
- self._deprecated_result_tool_name = _deprecated_kwargs.get('result_tool_name')
310
- if self._deprecated_result_tool_name is not None: # pragma: no cover
309
+ self._deprecated_result_tool_name = _deprecated_kwargs.pop('result_tool_name', None)
310
+ if self._deprecated_result_tool_name is not None:
311
311
  warnings.warn(
312
312
  '`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
313
313
  DeprecationWarning,
314
314
  )
315
315
 
316
- self._deprecated_result_tool_description = _deprecated_kwargs.get('result_tool_description')
317
- if self._deprecated_result_tool_description is not None: # pragma: no cover
316
+ self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
317
+ if self._deprecated_result_tool_description is not None:
318
318
  warnings.warn(
319
319
  '`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
320
320
  DeprecationWarning,
321
321
  )
322
- result_retries = _deprecated_kwargs.get('result_retries')
323
- if result_retries is not None: # pragma: no cover
324
- if output_retries is not None:
322
+ result_retries = _deprecated_kwargs.pop('result_retries', None)
323
+ if result_retries is not None:
324
+ if output_retries is not None: # pragma: no cover
325
325
  raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
326
326
  warnings.warn('`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning)
327
327
  output_retries = result_retries
@@ -329,6 +329,8 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
329
329
  default_output_mode = (
330
330
  self.model.profile.default_structured_output_mode if isinstance(self.model, models.Model) else None
331
331
  )
332
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
333
+
332
334
  self._output_schema = _output.OutputSchema[OutputDataT].build(
333
335
  output_type,
334
336
  default_mode=default_output_mode,
@@ -469,7 +471,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
469
471
  if output_type is not str:
470
472
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
471
473
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
472
- output_type = _deprecated_kwargs['result_type']
474
+ output_type = _deprecated_kwargs.pop('result_type')
475
+
476
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
473
477
 
474
478
  async with self.iter(
475
479
  user_prompt=user_prompt,
@@ -635,7 +639,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
635
639
  if output_type is not str:
636
640
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
637
641
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
638
- output_type = _deprecated_kwargs['result_type']
642
+ output_type = _deprecated_kwargs.pop('result_type')
643
+
644
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
639
645
 
640
646
  deps = self._get_deps(deps)
641
647
  new_message_index = len(message_history) if message_history else 0
@@ -719,6 +725,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
719
725
  tracer=tracer,
720
726
  prepare_tools=self._prepare_tools,
721
727
  get_instructions=get_instructions,
728
+ instrumentation_settings=instrumentation_settings,
722
729
  )
723
730
  start_node = _agent_graph.UserPromptNode[AgentDepsT](
724
731
  user_prompt=user_prompt,
@@ -871,7 +878,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
871
878
  if output_type is not str:
872
879
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
873
880
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
874
- output_type = _deprecated_kwargs['result_type']
881
+ output_type = _deprecated_kwargs.pop('result_type')
882
+
883
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
875
884
 
876
885
  return get_event_loop().run_until_complete(
877
886
  self.run(
@@ -987,7 +996,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
987
996
  if output_type is not str:
988
997
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
989
998
  warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
990
- output_type = _deprecated_kwargs['result_type']
999
+ output_type = _deprecated_kwargs.pop('result_type')
1000
+
1001
+ _utils.validate_empty_kwargs(_deprecated_kwargs)
991
1002
 
992
1003
  yielded = False
993
1004
  async with self.iter(
@@ -76,8 +76,11 @@ class SystemPromptPart:
76
76
  part_kind: Literal['system-prompt'] = 'system-prompt'
77
77
  """Part type identifier, this is available on all parts as a discriminator."""
78
78
 
79
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
80
- return Event('gen_ai.system.message', body={'content': self.content, 'role': 'system'})
79
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
80
+ return Event(
81
+ 'gen_ai.system.message',
82
+ body={'role': 'system', **({'content': self.content} if settings.include_content else {})},
83
+ )
81
84
 
82
85
  __repr__ = _utils.dataclasses_no_defaults_repr
83
86
 
@@ -303,6 +306,29 @@ class BinaryContent:
303
306
 
304
307
  UserContent: TypeAlias = 'str | ImageUrl | AudioUrl | DocumentUrl | VideoUrl | BinaryContent'
305
308
 
309
+
310
+ @dataclass(repr=False)
311
+ class ToolReturn:
312
+ """A structured return value for tools that need to provide both a return value and custom content to the model.
313
+
314
+ This class allows tools to return complex responses that include:
315
+ - A return value for actual tool return
316
+ - Custom content (including multi-modal content) to be sent to the model as a UserPromptPart
317
+ - Optional metadata for application use
318
+ """
319
+
320
+ return_value: Any
321
+ """The return value to be used in the tool response."""
322
+
323
+ content: Sequence[UserContent] | None = None
324
+ """The content sequence to be sent to the model as a UserPromptPart."""
325
+
326
+ metadata: Any = None
327
+ """Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
328
+
329
+ __repr__ = _utils.dataclasses_no_defaults_repr
330
+
331
+
306
332
  # Ideally this would be a Union of types, but Python 3.9 requires it to be a string, and strings don't work with `isinstance``.
307
333
  MultiModalContentTypes = (ImageUrl, AudioUrl, DocumentUrl, VideoUrl, BinaryContent)
308
334
  _document_format_lookup: dict[str, DocumentFormat] = {
@@ -362,12 +388,12 @@ class UserPromptPart:
362
388
  content = []
363
389
  for part in self.content:
364
390
  if isinstance(part, str):
365
- content.append(part)
391
+ content.append(part if settings.include_content else {'kind': 'text'})
366
392
  elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
367
- content.append({'kind': part.kind, 'url': part.url})
393
+ content.append({'kind': part.kind, **({'url': part.url} if settings.include_content else {})})
368
394
  elif isinstance(part, BinaryContent):
369
395
  converted_part = {'kind': part.kind, 'media_type': part.media_type}
370
- if settings.include_binary_content:
396
+ if settings.include_content and settings.include_binary_content:
371
397
  converted_part['binary_content'] = base64.b64encode(part.data).decode()
372
398
  content.append(converted_part)
373
399
  else:
@@ -393,6 +419,9 @@ class ToolReturnPart:
393
419
  tool_call_id: str
394
420
  """The tool call identifier, this is used by some models including OpenAI."""
395
421
 
422
+ metadata: Any = None
423
+ """Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
424
+
396
425
  timestamp: datetime = field(default_factory=_now_utc)
397
426
  """The timestamp, when the tool returned."""
398
427
 
@@ -414,10 +443,15 @@ class ToolReturnPart:
414
443
  else:
415
444
  return {'return_value': tool_return_ta.dump_python(self.content, mode='json')}
416
445
 
417
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
446
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
418
447
  return Event(
419
448
  'gen_ai.tool.message',
420
- body={'content': self.content, 'role': 'tool', 'id': self.tool_call_id, 'name': self.tool_name},
449
+ body={
450
+ **({'content': self.content} if settings.include_content else {}),
451
+ 'role': 'tool',
452
+ 'id': self.tool_call_id,
453
+ 'name': self.tool_name,
454
+ },
421
455
  )
422
456
 
423
457
  __repr__ = _utils.dataclasses_no_defaults_repr
@@ -473,14 +507,14 @@ class RetryPromptPart:
473
507
  description = f'{len(self.content)} validation errors: {json_errors.decode()}'
474
508
  return f'{description}\n\nFix the errors and try again.'
475
509
 
476
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
510
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
477
511
  if self.tool_name is None:
478
512
  return Event('gen_ai.user.message', body={'content': self.model_response(), 'role': 'user'})
479
513
  else:
480
514
  return Event(
481
515
  'gen_ai.tool.message',
482
516
  body={
483
- 'content': self.model_response(),
517
+ **({'content': self.model_response()} if settings.include_content else {}),
484
518
  'role': 'tool',
485
519
  'id': self.tool_call_id,
486
520
  'name': self.tool_name,
@@ -657,7 +691,7 @@ class ModelResponse:
657
691
  vendor_id: str | None = None
658
692
  """Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
659
693
 
660
- def otel_events(self) -> list[Event]:
694
+ def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
661
695
  """Return OpenTelemetry events for the response."""
662
696
  result: list[Event] = []
663
697
 
@@ -683,7 +717,8 @@ class ModelResponse:
683
717
  elif isinstance(part, TextPart):
684
718
  if body.get('content'):
685
719
  body = new_event_body()
686
- body['content'] = part.content
720
+ if settings.include_content:
721
+ body['content'] = part.content
687
722
 
688
723
  return result
689
724
 
@@ -342,15 +342,13 @@ class AnthropicModel(Model):
342
342
  if response_part.content: # Only add non-empty text
343
343
  assistant_content_params.append(BetaTextBlockParam(text=response_part.content, type='text'))
344
344
  elif isinstance(response_part, ThinkingPart):
345
- # NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
346
- # please open an issue. The below code is the code to send thinking to the provider.
347
- # assert response_part.signature is not None, 'Thinking part must have a signature'
348
- # assistant_content_params.append(
349
- # BetaThinkingBlockParam(
350
- # thinking=response_part.content, signature=response_part.signature, type='thinking'
351
- # )
352
- # )
353
- pass
345
+ # NOTE: We only send thinking part back for Anthropic, otherwise they raise an error.
346
+ if response_part.signature is not None: # pragma: no branch
347
+ assistant_content_params.append(
348
+ BetaThinkingBlockParam(
349
+ thinking=response_part.content, signature=response_part.signature, type='thinking'
350
+ )
351
+ )
354
352
  else:
355
353
  tool_use_block_param = BetaToolUseBlockParam(
356
354
  id=_guard_tool_call_id(t=response_part),
@@ -92,6 +92,7 @@ class InstrumentationSettings:
92
92
  meter_provider: MeterProvider | None = None,
93
93
  event_logger_provider: EventLoggerProvider | None = None,
94
94
  include_binary_content: bool = True,
95
+ include_content: bool = True,
95
96
  ):
96
97
  """Create instrumentation options.
97
98
 
@@ -109,6 +110,8 @@ class InstrumentationSettings:
109
110
  Calling `logfire.configure()` sets the global event logger provider, so most users don't need this.
110
111
  This is only used if `event_mode='logs'`.
111
112
  include_binary_content: Whether to include binary content in the instrumentation events.
113
+ include_content: Whether to include prompts, completions, and tool call arguments and responses
114
+ in the instrumentation events.
112
115
  """
113
116
  from pydantic_ai import __version__
114
117
 
@@ -121,6 +124,7 @@ class InstrumentationSettings:
121
124
  self.event_logger = event_logger_provider.get_event_logger(scope_name, __version__)
122
125
  self.event_mode = event_mode
123
126
  self.include_binary_content = include_binary_content
127
+ self.include_content = include_content
124
128
 
125
129
  # As specified in the OpenTelemetry GenAI metrics spec:
126
130
  # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-metrics/#metric-gen_aiclienttokenusage
@@ -161,7 +165,7 @@ class InstrumentationSettings:
161
165
  if hasattr(part, 'otel_event'):
162
166
  message_events.append(part.otel_event(self))
163
167
  elif isinstance(message, ModelResponse): # pragma: no branch
164
- message_events = message.otel_events()
168
+ message_events = message.otel_events(self)
165
169
  for event in message_events:
166
170
  event.attributes = {
167
171
  'gen_ai.message.index': message_index,
@@ -644,13 +644,16 @@ class OpenAIResponsesModel(Model):
644
644
  """Process a non-streamed response, and prepare a message to return."""
645
645
  timestamp = number_to_datetime(response.created_at)
646
646
  items: list[ModelResponsePart] = []
647
- items.append(TextPart(response.output_text))
648
647
  for item in response.output:
649
648
  if item.type == 'reasoning':
650
649
  for summary in item.summary:
651
650
  # NOTE: We use the same id for all summaries because we can merge them on the round trip.
652
651
  # The providers don't force the signature to be unique.
653
652
  items.append(ThinkingPart(content=summary.text, id=item.id))
653
+ elif item.type == 'message':
654
+ for content in item.content:
655
+ if content.type == 'output_text': # pragma: no branch
656
+ items.append(TextPart(content.text))
654
657
  elif item.type == 'function_call':
655
658
  items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
656
659
  return ModelResponse(
@@ -154,6 +154,8 @@ class NativeOutput(Generic[OutputDataT]):
154
154
  """The name of the structured output that will be passed to the model. If not specified and only one output is provided, the name of the output type or function will be used."""
155
155
  description: str | None
156
156
  """The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
157
+ strict: bool | None
158
+ """Whether to use strict mode for the output, if the model supports it."""
157
159
 
158
160
  def __init__(
159
161
  self,
@@ -161,10 +163,12 @@ class NativeOutput(Generic[OutputDataT]):
161
163
  *,
162
164
  name: str | None = None,
163
165
  description: str | None = None,
166
+ strict: bool | None = None,
164
167
  ):
165
168
  self.outputs = outputs
166
169
  self.name = name
167
170
  self.description = description
171
+ self.strict = strict
168
172
 
169
173
 
170
174
  @dataclass(init=False)
@@ -104,7 +104,12 @@ class GoogleProvider(Provider[genai.Client]):
104
104
  self._client = genai.Client(
105
105
  vertexai=vertexai,
106
106
  project=project or os.environ.get('GOOGLE_CLOUD_PROJECT'),
107
- location=location or os.environ.get('GOOGLE_CLOUD_LOCATION'),
107
+ # From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149:
108
+ # Currently `us-central1` supports the most models by far of any region including `global`, but not
109
+ # all of them. `us-central1` has all google models but is missing some Anthropic partner models,
110
+ # which use `us-east5` instead. `global` has fewer models but higher availability.
111
+ # For more details, check: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
112
+ location=location or os.environ.get('GOOGLE_CLOUD_LOCATION') or 'us-central1',
108
113
  credentials=credentials,
109
114
  http_options={'headers': {'User-Agent': get_user_agent()}},
110
115
  )
@@ -327,6 +327,7 @@ class Tool(Generic[AgentDepsT]):
327
327
  message: _messages.ToolCallPart,
328
328
  run_context: RunContext[AgentDepsT],
329
329
  tracer: Tracer,
330
+ include_content: bool = False,
330
331
  ) -> _messages.ToolReturnPart | _messages.RetryPromptPart:
331
332
  """Run the tool function asynchronously.
332
333
 
@@ -338,14 +339,14 @@ class Tool(Generic[AgentDepsT]):
338
339
  'gen_ai.tool.name': self.name,
339
340
  # NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai
340
341
  'gen_ai.tool.call.id': message.tool_call_id,
341
- 'tool_arguments': message.args_as_json_str(),
342
+ **({'tool_arguments': message.args_as_json_str()} if include_content else {}),
342
343
  'logfire.msg': f'running tool: {self.name}',
343
344
  # add the JSON schema so these attributes are formatted nicely in Logfire
344
345
  'logfire.json_schema': json.dumps(
345
346
  {
346
347
  'type': 'object',
347
348
  'properties': {
348
- 'tool_arguments': {'type': 'object'},
349
+ **({'tool_arguments': {'type': 'object'}} if include_content else {}),
349
350
  'gen_ai.tool.name': {},
350
351
  'gen_ai.tool.call.id': {},
351
352
  },