pydantic-ai-slim 0.4.1__tar.gz → 0.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (85) hide show
  1. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/PKG-INFO +7 -5
  2. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/__init__.py +2 -1
  3. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_a2a.py +3 -4
  4. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_agent_graph.py +5 -2
  5. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_output.py +130 -20
  6. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_utils.py +6 -1
  7. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/agent.py +13 -10
  8. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/common_tools/duckduckgo.py +5 -2
  9. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/exceptions.py +2 -2
  10. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/messages.py +6 -4
  11. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/__init__.py +34 -1
  12. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/anthropic.py +5 -2
  13. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/bedrock.py +5 -2
  14. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/cohere.py +5 -2
  15. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/fallback.py +1 -0
  16. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/function.py +13 -2
  17. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/gemini.py +13 -10
  18. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/google.py +5 -2
  19. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/groq.py +5 -2
  20. pydantic_ai_slim-0.4.3/pydantic_ai/models/huggingface.py +463 -0
  21. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/instrumented.py +12 -12
  22. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/mistral.py +6 -3
  23. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/openai.py +16 -4
  24. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/test.py +22 -1
  25. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/wrapper.py +6 -0
  26. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/output.py +65 -1
  27. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/__init__.py +4 -0
  28. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/google.py +2 -2
  29. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/google_vertex.py +10 -5
  30. pydantic_ai_slim-0.4.3/pydantic_ai/providers/huggingface.py +88 -0
  31. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/result.py +16 -5
  32. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pyproject.toml +4 -2
  33. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/.gitignore +0 -0
  34. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/LICENSE +0 -0
  35. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/README.md +0 -0
  36. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/__main__.py +0 -0
  37. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_cli.py +0 -0
  38. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_function_schema.py +0 -0
  39. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_griffe.py +0 -0
  40. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_mcp.py +0 -0
  41. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_parts_manager.py +0 -0
  42. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_run_context.py +0 -0
  43. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_system_prompt.py +0 -0
  44. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/_thinking_part.py +0 -0
  45. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/common_tools/__init__.py +0 -0
  46. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/common_tools/tavily.py +0 -0
  47. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/direct.py +0 -0
  48. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/ext/__init__.py +0 -0
  49. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/ext/aci.py +0 -0
  50. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/ext/langchain.py +0 -0
  51. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/format_as_xml.py +0 -0
  52. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/format_prompt.py +0 -0
  53. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/mcp.py +0 -0
  54. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/models/mcp_sampling.py +0 -0
  55. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/__init__.py +0 -0
  56. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/_json_schema.py +0 -0
  57. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/amazon.py +0 -0
  58. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/anthropic.py +0 -0
  59. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/cohere.py +0 -0
  60. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/deepseek.py +0 -0
  61. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/google.py +0 -0
  62. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/grok.py +0 -0
  63. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/meta.py +0 -0
  64. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/mistral.py +0 -0
  65. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/openai.py +0 -0
  66. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/profiles/qwen.py +0 -0
  67. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/anthropic.py +0 -0
  68. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/azure.py +0 -0
  69. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/bedrock.py +0 -0
  70. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/cohere.py +0 -0
  71. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/deepseek.py +0 -0
  72. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/fireworks.py +0 -0
  73. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/github.py +0 -0
  74. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/google_gla.py +0 -0
  75. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/grok.py +0 -0
  76. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/groq.py +0 -0
  77. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/heroku.py +0 -0
  78. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/mistral.py +0 -0
  79. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/openai.py +0 -0
  80. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/openrouter.py +0 -0
  81. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/providers/together.py +0 -0
  82. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/py.typed +0 -0
  83. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/settings.py +0 -0
  84. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/tools.py +0 -0
  85. {pydantic_ai_slim-0.4.1 → pydantic_ai_slim-0.4.3}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.4.1
3
+ Version: 0.4.3
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.4.1
33
+ Requires-Dist: pydantic-graph==0.4.3
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.4.1; extra == 'a2a'
37
+ Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -46,13 +46,15 @@ Requires-Dist: rich>=13; extra == 'cli'
46
46
  Provides-Extra: cohere
47
47
  Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == 'cohere'
48
48
  Provides-Extra: duckduckgo
49
- Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
49
+ Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.4.1; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.4.3; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.24.0; extra == 'google'
54
54
  Provides-Extra: groq
55
55
  Requires-Dist: groq>=0.19.0; extra == 'groq'
56
+ Provides-Extra: huggingface
57
+ Requires-Dist: huggingface-hub[inference]>=0.33.2; extra == 'huggingface'
56
58
  Provides-Extra: logfire
57
59
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
58
60
  Provides-Extra: mcp
@@ -12,7 +12,7 @@ from .exceptions import (
12
12
  )
13
13
  from .format_prompt import format_as_xml
14
14
  from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, VideoUrl
15
- from .output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
15
+ from .output import NativeOutput, PromptedOutput, StructuredDict, TextOutput, ToolOutput
16
16
  from .tools import RunContext, Tool
17
17
 
18
18
  __all__ = (
@@ -46,6 +46,7 @@ __all__ = (
46
46
  'NativeOutput',
47
47
  'PromptedOutput',
48
48
  'TextOutput',
49
+ 'StructuredDict',
49
50
  # format_prompt
50
51
  'format_as_xml',
51
52
  )
@@ -33,10 +33,6 @@ from .agent import Agent, AgentDepsT, OutputDataT
33
33
  WorkerOutputT = TypeVar('WorkerOutputT')
34
34
 
35
35
  try:
36
- from starlette.middleware import Middleware
37
- from starlette.routing import Route
38
- from starlette.types import ExceptionHandler, Lifespan
39
-
40
36
  from fasta2a.applications import FastA2A
41
37
  from fasta2a.broker import Broker, InMemoryBroker
42
38
  from fasta2a.schema import (
@@ -52,6 +48,9 @@ try:
52
48
  )
53
49
  from fasta2a.storage import InMemoryStorage, Storage
54
50
  from fasta2a.worker import Worker
51
+ from starlette.middleware import Middleware
52
+ from starlette.routing import Route
53
+ from starlette.types import ExceptionHandler, Lifespan
55
54
  except ImportError as _import_error:
56
55
  raise ImportError(
57
56
  'Please install the `fasta2a` package to use `Agent.to_a2a()` method, '
@@ -341,6 +341,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
341
341
  ctx.deps.output_schema,
342
342
  ctx.deps.output_validators,
343
343
  build_run_context(ctx),
344
+ _output.build_trace_context(ctx),
344
345
  ctx.deps.usage_limits,
345
346
  )
346
347
  yield agent_stream
@@ -529,7 +530,8 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
529
530
  if isinstance(output_schema, _output.ToolOutputSchema):
530
531
  for call, output_tool in output_schema.find_tool(tool_calls):
531
532
  try:
532
- result_data = await output_tool.process(call, run_context)
533
+ trace_context = _output.build_trace_context(ctx)
534
+ result_data = await output_tool.process(call, run_context, trace_context)
533
535
  result_data = await _validate_output(result_data, ctx, call)
534
536
  except _output.ToolRetryError as e:
535
537
  # TODO: Should only increment retry stuff once per node execution, not for each tool call
@@ -586,7 +588,8 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
586
588
  try:
587
589
  if isinstance(output_schema, _output.TextOutputSchema):
588
590
  run_context = build_run_context(ctx)
589
- result_data = await output_schema.process(text, run_context)
591
+ trace_context = _output.build_trace_context(ctx)
592
+ result_data = await output_schema.process(text, run_context, trace_context)
590
593
  else:
591
594
  m = _messages.RetryPromptPart(
592
595
  content='Plain text responses are not permitted, please include your response in a tool call',
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
+ import dataclasses
3
4
  import inspect
4
5
  import json
5
6
  from abc import ABC, abstractmethod
@@ -7,10 +8,13 @@ from collections.abc import Awaitable, Iterable, Iterator, Sequence
7
8
  from dataclasses import dataclass, field
8
9
  from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast, overload
9
10
 
11
+ from opentelemetry.trace import Tracer
10
12
  from pydantic import TypeAdapter, ValidationError
11
13
  from pydantic_core import SchemaValidator
12
14
  from typing_extensions import TypedDict, TypeVar, assert_never
13
15
 
16
+ from pydantic_graph.nodes import GraphRunContext
17
+
14
18
  from . import _function_schema, _utils, messages as _messages
15
19
  from ._run_context import AgentDepsT, RunContext
16
20
  from .exceptions import ModelRetry, UserError
@@ -29,6 +33,8 @@ from .output import (
29
33
  from .tools import GenerateToolJsonSchema, ObjectJsonSchema, ToolDefinition
30
34
 
31
35
  if TYPE_CHECKING:
36
+ from pydantic_ai._agent_graph import DepsT, GraphAgentDeps, GraphAgentState
37
+
32
38
  from .profiles import ModelProfile
33
39
 
34
40
  T = TypeVar('T')
@@ -66,6 +72,71 @@ DEFAULT_OUTPUT_TOOL_NAME = 'final_result'
66
72
  DEFAULT_OUTPUT_TOOL_DESCRIPTION = 'The final response which ends this conversation'
67
73
 
68
74
 
75
+ @dataclass(frozen=True)
76
+ class TraceContext:
77
+ """A context for tracing output processing."""
78
+
79
+ tracer: Tracer
80
+ include_content: bool
81
+ call: _messages.ToolCallPart | None = None
82
+
83
+ def with_call(self, call: _messages.ToolCallPart):
84
+ return dataclasses.replace(self, call=call)
85
+
86
+ async def execute_function_with_span(
87
+ self,
88
+ function_schema: _function_schema.FunctionSchema,
89
+ run_context: RunContext[AgentDepsT],
90
+ args: dict[str, Any] | Any,
91
+ call: _messages.ToolCallPart,
92
+ include_tool_call_id: bool = True,
93
+ ) -> Any:
94
+ """Execute a function call within a traced span, automatically recording the response."""
95
+ # Set up span attributes
96
+ attributes = {
97
+ 'gen_ai.tool.name': call.tool_name,
98
+ 'logfire.msg': f'running output function: {call.tool_name}',
99
+ }
100
+ if include_tool_call_id:
101
+ attributes['gen_ai.tool.call.id'] = call.tool_call_id
102
+ if self.include_content:
103
+ attributes['tool_arguments'] = call.args_as_json_str()
104
+ attributes['logfire.json_schema'] = json.dumps(
105
+ {
106
+ 'type': 'object',
107
+ 'properties': {
108
+ 'tool_arguments': {'type': 'object'},
109
+ 'tool_response': {'type': 'object'},
110
+ },
111
+ }
112
+ )
113
+
114
+ # Execute function within span
115
+ with self.tracer.start_as_current_span('running output function', attributes=attributes) as span:
116
+ output = await function_schema.call(args, run_context)
117
+
118
+ # Record response if content inclusion is enabled
119
+ if self.include_content and span.is_recording():
120
+ from .models.instrumented import InstrumentedModel
121
+
122
+ span.set_attribute(
123
+ 'tool_response',
124
+ output if isinstance(output, str) else json.dumps(InstrumentedModel.serialize_any(output)),
125
+ )
126
+
127
+ return output
128
+
129
+
130
+ def build_trace_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, Any]]) -> TraceContext:
131
+ """Build a `TraceContext` from the current agent graph run context."""
132
+ return TraceContext(
133
+ tracer=ctx.deps.tracer,
134
+ include_content=(
135
+ ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content
136
+ ),
137
+ )
138
+
139
+
69
140
  class ToolRetryError(Exception):
70
141
  """Exception used to signal a `ToolRetry` message should be returned to the LLM."""
71
142
 
@@ -96,6 +167,7 @@ class OutputValidator(Generic[AgentDepsT, OutputDataT_inv]):
96
167
  result: The result data after Pydantic validation the message content.
97
168
  tool_call: The original tool call message, `None` if there was no tool call.
98
169
  run_context: The current run context.
170
+ trace_context: The trace context to use for tracing the output processing.
99
171
 
100
172
  Returns:
101
173
  Result of either the validated result data (ok) or a retry message (Err).
@@ -264,10 +336,16 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
264
336
 
265
337
  output = output.output
266
338
 
339
+ description = description or default_description
340
+ if strict is None:
341
+ strict = default_strict
342
+
343
+ processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
344
+
267
345
  if name is None:
268
346
  name = default_name
269
347
  if multiple:
270
- name += f'_{output.__name__}'
348
+ name += f'_{processor.object_def.name}'
271
349
 
272
350
  i = 1
273
351
  original_name = name
@@ -275,11 +353,6 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
275
353
  i += 1
276
354
  name = f'{original_name}_{i}'
277
355
 
278
- description = description or default_description
279
- if strict is None:
280
- strict = default_strict
281
-
282
- processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
283
356
  tools[name] = OutputTool(name=name, processor=processor, multiple=multiple)
284
357
 
285
358
  return tools
@@ -348,6 +421,7 @@ class TextOutputSchema(OutputSchema[OutputDataT], ABC):
348
421
  self,
349
422
  text: str,
350
423
  run_context: RunContext[AgentDepsT],
424
+ trace_context: TraceContext,
351
425
  allow_partial: bool = False,
352
426
  wrap_validation_errors: bool = True,
353
427
  ) -> OutputDataT:
@@ -370,6 +444,7 @@ class PlainTextOutputSchema(TextOutputSchema[OutputDataT]):
370
444
  self,
371
445
  text: str,
372
446
  run_context: RunContext[AgentDepsT],
447
+ trace_context: TraceContext,
373
448
  allow_partial: bool = False,
374
449
  wrap_validation_errors: bool = True,
375
450
  ) -> OutputDataT:
@@ -378,6 +453,7 @@ class PlainTextOutputSchema(TextOutputSchema[OutputDataT]):
378
453
  Args:
379
454
  text: The output text to validate.
380
455
  run_context: The current run context.
456
+ trace_context: The trace context to use for tracing the output processing.
381
457
  allow_partial: If true, allow partial validation.
382
458
  wrap_validation_errors: If true, wrap the validation errors in a retry message.
383
459
 
@@ -388,7 +464,7 @@ class PlainTextOutputSchema(TextOutputSchema[OutputDataT]):
388
464
  return cast(OutputDataT, text)
389
465
 
390
466
  return await self.processor.process(
391
- text, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
467
+ text, run_context, trace_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
392
468
  )
393
469
 
394
470
 
@@ -416,6 +492,7 @@ class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
416
492
  self,
417
493
  text: str,
418
494
  run_context: RunContext[AgentDepsT],
495
+ trace_context: TraceContext,
419
496
  allow_partial: bool = False,
420
497
  wrap_validation_errors: bool = True,
421
498
  ) -> OutputDataT:
@@ -424,6 +501,7 @@ class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
424
501
  Args:
425
502
  text: The output text to validate.
426
503
  run_context: The current run context.
504
+ trace_context: The trace context to use for tracing the output processing.
427
505
  allow_partial: If true, allow partial validation.
428
506
  wrap_validation_errors: If true, wrap the validation errors in a retry message.
429
507
 
@@ -431,7 +509,7 @@ class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
431
509
  Either the validated output data (left) or a retry message (right).
432
510
  """
433
511
  return await self.processor.process(
434
- text, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
512
+ text, run_context, trace_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
435
513
  )
436
514
 
437
515
 
@@ -467,6 +545,7 @@ class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
467
545
  self,
468
546
  text: str,
469
547
  run_context: RunContext[AgentDepsT],
548
+ trace_context: TraceContext,
470
549
  allow_partial: bool = False,
471
550
  wrap_validation_errors: bool = True,
472
551
  ) -> OutputDataT:
@@ -475,6 +554,7 @@ class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
475
554
  Args:
476
555
  text: The output text to validate.
477
556
  run_context: The current run context.
557
+ trace_context: The trace context to use for tracing the output processing.
478
558
  allow_partial: If true, allow partial validation.
479
559
  wrap_validation_errors: If true, wrap the validation errors in a retry message.
480
560
 
@@ -484,7 +564,7 @@ class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
484
564
  text = _utils.strip_markdown_fences(text)
485
565
 
486
566
  return await self.processor.process(
487
- text, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
567
+ text, run_context, trace_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
488
568
  )
489
569
 
490
570
 
@@ -567,6 +647,7 @@ class BaseOutputProcessor(ABC, Generic[OutputDataT]):
567
647
  self,
568
648
  data: str,
569
649
  run_context: RunContext[AgentDepsT],
650
+ trace_context: TraceContext,
570
651
  allow_partial: bool = False,
571
652
  wrap_validation_errors: bool = True,
572
653
  ) -> OutputDataT:
@@ -616,6 +697,9 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
616
697
  # including `response_data_typed_dict` as a title here doesn't add anything and could confuse the LLM
617
698
  json_schema.pop('title')
618
699
 
700
+ if name is None and (json_schema_title := json_schema.get('title', None)):
701
+ name = json_schema_title
702
+
619
703
  if json_schema_description := json_schema.pop('description', None):
620
704
  if description is None:
621
705
  description = json_schema_description
@@ -633,6 +717,7 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
633
717
  self,
634
718
  data: str | dict[str, Any] | None,
635
719
  run_context: RunContext[AgentDepsT],
720
+ trace_context: TraceContext,
636
721
  allow_partial: bool = False,
637
722
  wrap_validation_errors: bool = True,
638
723
  ) -> OutputDataT:
@@ -641,6 +726,7 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
641
726
  Args:
642
727
  data: The output data to validate.
643
728
  run_context: The current run context.
729
+ trace_context: The trace context to use for tracing the output processing.
644
730
  allow_partial: If true, allow partial validation.
645
731
  wrap_validation_errors: If true, wrap the validation errors in a retry message.
646
732
 
@@ -660,14 +746,24 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
660
746
  )
661
747
  raise ToolRetryError(m) from e
662
748
  else:
663
- raise # pragma: lax no cover
749
+ raise
664
750
 
665
751
  if k := self.outer_typed_dict_key:
666
752
  output = output[k]
667
753
 
668
754
  if self._function_schema:
755
+ # Wraps the output function call in an OpenTelemetry span.
756
+ if trace_context.call:
757
+ call = trace_context.call
758
+ include_tool_call_id = True
759
+ else:
760
+ function_name = getattr(self._function_schema.function, '__name__', 'output_function')
761
+ call = _messages.ToolCallPart(tool_name=function_name, args=data)
762
+ include_tool_call_id = False
669
763
  try:
670
- output = await self._function_schema.call(output, run_context)
764
+ output = await trace_context.execute_function_with_span(
765
+ self._function_schema, run_context, output, call, include_tool_call_id
766
+ )
671
767
  except ModelRetry as r:
672
768
  if wrap_validation_errors:
673
769
  m = _messages.RetryPromptPart(
@@ -675,7 +771,7 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
675
771
  )
676
772
  raise ToolRetryError(m) from r
677
773
  else:
678
- raise # pragma: lax no cover
774
+ raise
679
775
 
680
776
  return output
681
777
 
@@ -780,11 +876,12 @@ class UnionOutputProcessor(BaseOutputProcessor[OutputDataT]):
780
876
  self,
781
877
  data: str | dict[str, Any] | None,
782
878
  run_context: RunContext[AgentDepsT],
879
+ trace_context: TraceContext,
783
880
  allow_partial: bool = False,
784
881
  wrap_validation_errors: bool = True,
785
882
  ) -> OutputDataT:
786
883
  union_object = await self._union_processor.process(
787
- data, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
884
+ data, run_context, trace_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
788
885
  )
789
886
 
790
887
  result = union_object.result
@@ -800,7 +897,7 @@ class UnionOutputProcessor(BaseOutputProcessor[OutputDataT]):
800
897
  raise
801
898
 
802
899
  return await processor.process(
803
- data, run_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
900
+ data, run_context, trace_context, allow_partial=allow_partial, wrap_validation_errors=wrap_validation_errors
804
901
  )
805
902
 
806
903
 
@@ -831,13 +928,20 @@ class PlainTextOutputProcessor(BaseOutputProcessor[OutputDataT]):
831
928
  self,
832
929
  data: str,
833
930
  run_context: RunContext[AgentDepsT],
931
+ trace_context: TraceContext,
834
932
  allow_partial: bool = False,
835
933
  wrap_validation_errors: bool = True,
836
934
  ) -> OutputDataT:
837
935
  args = {self._str_argument_name: data}
838
-
936
+ # Wraps the output function call in an OpenTelemetry span.
937
+ # Note: PlainTextOutputProcessor is used for text responses (not tool calls),
938
+ # so we don't have tool call attributes like gen_ai.tool.name or gen_ai.tool.call.id
939
+ function_name = getattr(self._function_schema.function, '__name__', 'text_output_function')
940
+ call = _messages.ToolCallPart(tool_name=function_name, args=args)
839
941
  try:
840
- output = await self._function_schema.call(args, run_context)
942
+ output = await trace_context.execute_function_with_span(
943
+ self._function_schema, run_context, args, call, include_tool_call_id=False
944
+ )
841
945
  except ModelRetry as r:
842
946
  if wrap_validation_errors:
843
947
  m = _messages.RetryPromptPart(
@@ -845,7 +949,7 @@ class PlainTextOutputProcessor(BaseOutputProcessor[OutputDataT]):
845
949
  )
846
950
  raise ToolRetryError(m) from r
847
951
  else:
848
- raise # pragma: lax no cover
952
+ raise # pragma: no cover
849
953
 
850
954
  return cast(OutputDataT, output)
851
955
 
@@ -877,6 +981,7 @@ class OutputTool(Generic[OutputDataT]):
877
981
  self,
878
982
  tool_call: _messages.ToolCallPart,
879
983
  run_context: RunContext[AgentDepsT],
984
+ trace_context: TraceContext,
880
985
  allow_partial: bool = False,
881
986
  wrap_validation_errors: bool = True,
882
987
  ) -> OutputDataT:
@@ -885,6 +990,7 @@ class OutputTool(Generic[OutputDataT]):
885
990
  Args:
886
991
  tool_call: The tool call from the LLM to validate.
887
992
  run_context: The current run context.
993
+ trace_context: The trace context to use for tracing the output processing.
888
994
  allow_partial: If true, allow partial validation.
889
995
  wrap_validation_errors: If true, wrap the validation errors in a retry message.
890
996
 
@@ -893,7 +999,11 @@ class OutputTool(Generic[OutputDataT]):
893
999
  """
894
1000
  try:
895
1001
  output = await self.processor.process(
896
- tool_call.args, run_context, allow_partial=allow_partial, wrap_validation_errors=False
1002
+ tool_call.args,
1003
+ run_context,
1004
+ trace_context.with_call(tool_call),
1005
+ allow_partial=allow_partial,
1006
+ wrap_validation_errors=False,
897
1007
  )
898
1008
  except ValidationError as e:
899
1009
  if wrap_validation_errors:
@@ -904,7 +1014,7 @@ class OutputTool(Generic[OutputDataT]):
904
1014
  )
905
1015
  raise ToolRetryError(m) from e
906
1016
  else:
907
- raise # pragma: lax no cover
1017
+ raise # pragma: no cover
908
1018
  except ModelRetry as r:
909
1019
  if wrap_validation_errors:
910
1020
  m = _messages.RetryPromptPart(
@@ -914,7 +1024,7 @@ class OutputTool(Generic[OutputDataT]):
914
1024
  )
915
1025
  raise ToolRetryError(m) from r
916
1026
  else:
917
- raise # pragma: lax no cover
1027
+ raise # pragma: no cover
918
1028
  else:
919
1029
  return output
920
1030
 
@@ -60,7 +60,12 @@ def is_model_like(type_: Any) -> bool:
60
60
  return (
61
61
  isinstance(type_, type)
62
62
  and not isinstance(type_, GenericAlias)
63
- and (issubclass(type_, BaseModel) or is_dataclass(type_) or is_typeddict(type_)) # pyright: ignore[reportUnknownArgumentType]
63
+ and (
64
+ issubclass(type_, BaseModel)
65
+ or is_dataclass(type_) # pyright: ignore[reportUnknownArgumentType]
66
+ or is_typeddict(type_) # pyright: ignore[reportUnknownArgumentType]
67
+ or getattr(type_, '__is_model_like__', False) # pyright: ignore[reportUnknownArgumentType]
68
+ )
64
69
  )
65
70
 
66
71
 
@@ -57,14 +57,14 @@ ModelRequestNode = _agent_graph.ModelRequestNode
57
57
  UserPromptNode = _agent_graph.UserPromptNode
58
58
 
59
59
  if TYPE_CHECKING:
60
- from starlette.middleware import Middleware
61
- from starlette.routing import Route
62
- from starlette.types import ExceptionHandler, Lifespan
63
-
64
60
  from fasta2a.applications import FastA2A
65
61
  from fasta2a.broker import Broker
66
62
  from fasta2a.schema import AgentProvider, Skill
67
63
  from fasta2a.storage import Storage
64
+ from starlette.middleware import Middleware
65
+ from starlette.routing import Route
66
+ from starlette.types import ExceptionHandler, Lifespan
67
+
68
68
  from pydantic_ai.mcp import MCPServer
69
69
 
70
70
 
@@ -500,7 +500,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
500
500
  @overload
501
501
  def iter(
502
502
  self,
503
- user_prompt: str | Sequence[_messages.UserContent] | None,
503
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
504
504
  *,
505
505
  output_type: None = None,
506
506
  message_history: list[_messages.ModelMessage] | None = None,
@@ -516,7 +516,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
516
516
  @overload
517
517
  def iter(
518
518
  self,
519
- user_prompt: str | Sequence[_messages.UserContent] | None,
519
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
520
520
  *,
521
521
  output_type: OutputSpec[RunOutputDataT],
522
522
  message_history: list[_messages.ModelMessage] | None = None,
@@ -533,7 +533,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
533
533
  @deprecated('`result_type` is deprecated, use `output_type` instead.')
534
534
  def iter(
535
535
  self,
536
- user_prompt: str | Sequence[_messages.UserContent] | None,
536
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
537
537
  *,
538
538
  result_type: type[RunOutputDataT],
539
539
  message_history: list[_messages.ModelMessage] | None = None,
@@ -674,12 +674,14 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
674
674
  # typecast reasonable, even though it is possible to violate it with otherwise-type-checked code.
675
675
  output_validators = cast(list[_output.OutputValidator[AgentDepsT, RunOutputDataT]], self._output_validators)
676
676
 
677
- model_settings = merge_model_settings(self.model_settings, model_settings)
677
+ # Merge model settings in order of precedence: run > agent > model
678
+ merged_settings = merge_model_settings(model_used.settings, self.model_settings)
679
+ model_settings = merge_model_settings(merged_settings, model_settings)
678
680
  usage_limits = usage_limits or _usage.UsageLimits()
679
681
 
680
682
  if isinstance(model_used, InstrumentedModel):
681
- instrumentation_settings = model_used.settings
682
- tracer = model_used.settings.tracer
683
+ instrumentation_settings = model_used.instrumentation_settings
684
+ tracer = model_used.instrumentation_settings.tracer
683
685
  else:
684
686
  instrumentation_settings = None
685
687
  tracer = NoOpTracer()
@@ -1087,6 +1089,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1087
1089
  streamed_response,
1088
1090
  graph_ctx.deps.output_schema,
1089
1091
  _agent_graph.build_run_context(graph_ctx),
1092
+ _output.build_trace_context(graph_ctx),
1090
1093
  graph_ctx.deps.output_validators,
1091
1094
  final_result_details.tool_name,
1092
1095
  on_complete,
@@ -9,10 +9,13 @@ from typing_extensions import TypedDict
9
9
  from pydantic_ai.tools import Tool
10
10
 
11
11
  try:
12
- from duckduckgo_search import DDGS
12
+ try:
13
+ from ddgs import DDGS
14
+ except ImportError: # Fallback for older versions of ddgs
15
+ from duckduckgo_search import DDGS
13
16
  except ImportError as _import_error:
14
17
  raise ImportError(
15
- 'Please install `duckduckgo-search` to use the DuckDuckGo search tool, '
18
+ 'Please install `ddgs` to use the DuckDuckGo search tool, '
16
19
  'you can use the `duckduckgo` optional group — `pip install "pydantic-ai-slim[duckduckgo]"`'
17
20
  ) from _import_error
18
21
 
@@ -4,9 +4,9 @@ import json
4
4
  import sys
5
5
 
6
6
  if sys.version_info < (3, 11):
7
- from exceptiongroup import ExceptionGroup # pragma: lax no cover
7
+ from exceptiongroup import ExceptionGroup
8
8
  else:
9
- ExceptionGroup = ExceptionGroup # pragma: lax no cover
9
+ ExceptionGroup = ExceptionGroup
10
10
 
11
11
  __all__ = (
12
12
  'ModelRetry',
@@ -411,9 +411,9 @@ class UserPromptPart:
411
411
  """Part type identifier, this is available on all parts as a discriminator."""
412
412
 
413
413
  def otel_event(self, settings: InstrumentationSettings) -> Event:
414
- content: str | list[dict[str, Any] | str]
414
+ content: str | list[dict[str, Any] | str] | dict[str, Any]
415
415
  if isinstance(self.content, str):
416
- content = self.content
416
+ content = self.content if settings.include_content else {'kind': 'text'}
417
417
  else:
418
418
  content = []
419
419
  for part in self.content:
@@ -433,7 +433,9 @@ class UserPromptPart:
433
433
  __repr__ = _utils.dataclasses_no_defaults_repr
434
434
 
435
435
 
436
- tool_return_ta: pydantic.TypeAdapter[Any] = pydantic.TypeAdapter(Any, config=pydantic.ConfigDict(defer_build=True))
436
+ tool_return_ta: pydantic.TypeAdapter[Any] = pydantic.TypeAdapter(
437
+ Any, config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64', val_json_bytes='base64')
438
+ )
437
439
 
438
440
 
439
441
  @dataclass(repr=False)
@@ -743,7 +745,7 @@ class ModelResponse:
743
745
  'type': 'function', # TODO https://github.com/pydantic/pydantic-ai/issues/888
744
746
  'function': {
745
747
  'name': part.tool_name,
746
- 'arguments': part.args,
748
+ **({'arguments': part.args} if settings.include_content else {}),
747
749
  },
748
750
  }
749
751
  )