pydantic-ai-slim 0.4.5__tar.gz → 0.4.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (100) hide show
  1. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/.gitignore +0 -1
  2. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/PKG-INFO +7 -7
  3. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_function_schema.py +13 -4
  4. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_output.py +41 -25
  5. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_parts_manager.py +31 -5
  6. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/ag_ui.py +68 -78
  7. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/agent.py +9 -29
  8. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/mcp.py +79 -19
  9. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/messages.py +74 -16
  10. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/__init__.py +12 -1
  11. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/anthropic.py +11 -3
  12. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/bedrock.py +4 -2
  13. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/cohere.py +6 -6
  14. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/function.py +19 -18
  15. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/gemini.py +5 -1
  16. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/google.py +9 -2
  17. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/groq.py +6 -2
  18. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/huggingface.py +6 -2
  19. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/mistral.py +15 -3
  20. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/openai.py +34 -7
  21. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/test.py +6 -2
  22. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/openai.py +8 -0
  23. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/__init__.py +8 -0
  24. pydantic_ai_slim-0.4.7/pydantic_ai/providers/moonshotai.py +97 -0
  25. pydantic_ai_slim-0.4.7/pydantic_ai/providers/vercel.py +107 -0
  26. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/result.py +115 -151
  27. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pyproject.toml +4 -4
  28. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/LICENSE +0 -0
  29. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/README.md +0 -0
  30. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/__init__.py +0 -0
  31. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/__main__.py +0 -0
  32. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_a2a.py +0 -0
  33. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_agent_graph.py +0 -0
  34. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_cli.py +0 -0
  35. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_griffe.py +0 -0
  36. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_mcp.py +0 -0
  37. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_run_context.py +0 -0
  38. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_system_prompt.py +0 -0
  39. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_thinking_part.py +0 -0
  40. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_tool_manager.py +0 -0
  41. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/_utils.py +0 -0
  42. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/common_tools/__init__.py +0 -0
  43. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  44. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/common_tools/tavily.py +0 -0
  45. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/direct.py +0 -0
  46. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/exceptions.py +0 -0
  47. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/ext/__init__.py +0 -0
  48. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/ext/aci.py +0 -0
  49. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/ext/langchain.py +0 -0
  50. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/format_as_xml.py +0 -0
  51. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/format_prompt.py +0 -0
  52. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/fallback.py +0 -0
  53. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/instrumented.py +0 -0
  54. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/mcp_sampling.py +0 -0
  55. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/models/wrapper.py +0 -0
  56. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/output.py +0 -0
  57. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/__init__.py +0 -0
  58. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/_json_schema.py +0 -0
  59. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/amazon.py +0 -0
  60. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/anthropic.py +0 -0
  61. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/cohere.py +0 -0
  62. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/deepseek.py +0 -0
  63. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/google.py +0 -0
  64. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/grok.py +0 -0
  65. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/meta.py +0 -0
  66. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/mistral.py +0 -0
  67. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/moonshotai.py +0 -0
  68. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/profiles/qwen.py +0 -0
  69. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/anthropic.py +0 -0
  70. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/azure.py +0 -0
  71. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/bedrock.py +0 -0
  72. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/cohere.py +0 -0
  73. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/deepseek.py +0 -0
  74. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/fireworks.py +0 -0
  75. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/github.py +0 -0
  76. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/google.py +0 -0
  77. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/google_gla.py +0 -0
  78. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/google_vertex.py +0 -0
  79. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/grok.py +0 -0
  80. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/groq.py +0 -0
  81. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/heroku.py +0 -0
  82. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/huggingface.py +0 -0
  83. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/mistral.py +0 -0
  84. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/openai.py +0 -0
  85. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/openrouter.py +0 -0
  86. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/providers/together.py +0 -0
  87. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/py.typed +0 -0
  88. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/settings.py +0 -0
  89. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/tools.py +0 -0
  90. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/__init__.py +0 -0
  91. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/abstract.py +0 -0
  92. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/combined.py +0 -0
  93. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/deferred.py +0 -0
  94. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/filtered.py +0 -0
  95. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/function.py +0 -0
  96. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/prefixed.py +0 -0
  97. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/prepared.py +0 -0
  98. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/renamed.py +0 -0
  99. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/toolsets/wrapper.py +0 -0
  100. {pydantic_ai_slim-0.4.5 → pydantic_ai_slim-0.4.7}/pydantic_ai/usage.py +0 -0
@@ -15,7 +15,6 @@ examples/pydantic_ai_examples/.chat_app_messages.sqlite
15
15
  .vscode/
16
16
  /question_graph_history.json
17
17
  /docs-site/.wrangler/
18
- /CLAUDE.md
19
18
  node_modules/
20
19
  **.idea/
21
20
  .coverage*
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.4.5
3
+ Version: 0.4.7
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,7 +30,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.4.5
33
+ Requires-Dist: pydantic-graph==0.4.7
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
@@ -47,23 +47,23 @@ Requires-Dist: argcomplete>=3.5.0; extra == 'cli'
47
47
  Requires-Dist: prompt-toolkit>=3; extra == 'cli'
48
48
  Requires-Dist: rich>=13; extra == 'cli'
49
49
  Provides-Extra: cohere
50
- Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == 'cohere'
50
+ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'cohere'
51
51
  Provides-Extra: duckduckgo
52
52
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
53
53
  Provides-Extra: evals
54
- Requires-Dist: pydantic-evals==0.4.5; extra == 'evals'
54
+ Requires-Dist: pydantic-evals==0.4.7; extra == 'evals'
55
55
  Provides-Extra: google
56
56
  Requires-Dist: google-genai>=1.24.0; extra == 'google'
57
57
  Provides-Extra: groq
58
58
  Requires-Dist: groq>=0.19.0; extra == 'groq'
59
59
  Provides-Extra: huggingface
60
- Requires-Dist: huggingface-hub[inference]>=0.33.2; extra == 'huggingface'
60
+ Requires-Dist: huggingface-hub[inference]>=0.33.5; extra == 'huggingface'
61
61
  Provides-Extra: logfire
62
62
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
63
63
  Provides-Extra: mcp
64
- Requires-Dist: mcp>=1.9.4; (python_version >= '3.10') and extra == 'mcp'
64
+ Requires-Dist: mcp>=1.10.0; (python_version >= '3.10') and extra == 'mcp'
65
65
  Provides-Extra: mistral
66
- Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
66
+ Requires-Dist: mistralai>=1.9.2; extra == 'mistral'
67
67
  Provides-Extra: openai
68
68
  Requires-Dist: openai>=1.92.0; extra == 'openai'
69
69
  Provides-Extra: tavily
@@ -96,8 +96,13 @@ def function_schema( # noqa: C901
96
96
  config = ConfigDict(title=function.__name__, use_attribute_docstrings=True)
97
97
  config_wrapper = ConfigWrapper(config)
98
98
  gen_schema = _generate_schema.GenerateSchema(config_wrapper)
99
+ errors: list[str] = []
99
100
 
100
- sig = signature(function)
101
+ try:
102
+ sig = signature(function)
103
+ except ValueError as e:
104
+ errors.append(str(e))
105
+ sig = signature(lambda: None)
101
106
 
102
107
  type_hints = _typing_extra.get_function_type_hints(function)
103
108
 
@@ -105,7 +110,6 @@ def function_schema( # noqa: C901
105
110
  fields: dict[str, core_schema.TypedDictField] = {}
106
111
  positional_fields: list[str] = []
107
112
  var_positional_field: str | None = None
108
- errors: list[str] = []
109
113
  decorators = _decorators.DecoratorInfos()
110
114
 
111
115
  description, field_descriptions = doc_descriptions(function, sig, docstring_format=docstring_format)
@@ -235,14 +239,19 @@ def _takes_ctx(function: TargetFunc[P, R]) -> TypeIs[WithCtx[P, R]]:
235
239
  Returns:
236
240
  `True` if the function takes a `RunContext` as first argument, `False` otherwise.
237
241
  """
238
- sig = signature(function)
242
+ try:
243
+ sig = signature(function)
244
+ except ValueError: # pragma: no cover
245
+ return False # pragma: no cover
239
246
  try:
240
247
  first_param_name = next(iter(sig.parameters.keys()))
241
248
  except StopIteration:
242
249
  return False
243
250
  else:
244
251
  type_hints = _typing_extra.get_function_type_hints(function)
245
- annotation = type_hints[first_param_name]
252
+ annotation = type_hints.get(first_param_name)
253
+ if annotation is None:
254
+ return False # pragma: no cover
246
255
  return True is not sig.empty and _is_call_ctx(annotation)
247
256
 
248
257
 
@@ -69,12 +69,31 @@ DEFAULT_OUTPUT_TOOL_NAME = 'final_result'
69
69
  DEFAULT_OUTPUT_TOOL_DESCRIPTION = 'The final response which ends this conversation'
70
70
 
71
71
 
72
- async def execute_output_function_with_span(
72
+ async def execute_traced_output_function(
73
73
  function_schema: _function_schema.FunctionSchema,
74
74
  run_context: RunContext[AgentDepsT],
75
75
  args: dict[str, Any] | Any,
76
+ wrap_validation_errors: bool = True,
76
77
  ) -> Any:
77
- """Execute a function call within a traced span, automatically recording the response."""
78
+ """Execute an output function within a traced span with error handling.
79
+
80
+ This function executes the output function within an OpenTelemetry span for observability,
81
+ automatically records the function response, and handles ModelRetry exceptions by converting
82
+ them to ToolRetryError when wrap_validation_errors is True.
83
+
84
+ Args:
85
+ function_schema: The function schema containing the function to execute
86
+ run_context: The current run context containing tracing and tool information
87
+ args: Arguments to pass to the function
88
+ wrap_validation_errors: If True, wrap ModelRetry exceptions in ToolRetryError
89
+
90
+ Returns:
91
+ The result of the function execution
92
+
93
+ Raises:
94
+ ToolRetryError: When wrap_validation_errors is True and a ModelRetry is caught
95
+ ModelRetry: When wrap_validation_errors is False and a ModelRetry occurs
96
+ """
78
97
  # Set up span attributes
79
98
  tool_name = run_context.tool_name or getattr(function_schema.function, '__name__', 'output_function')
80
99
  attributes = {
@@ -96,7 +115,19 @@ async def execute_output_function_with_span(
96
115
  )
97
116
 
98
117
  with run_context.tracer.start_as_current_span('running output function', attributes=attributes) as span:
99
- output = await function_schema.call(args, run_context)
118
+ try:
119
+ output = await function_schema.call(args, run_context)
120
+ except ModelRetry as r:
121
+ if wrap_validation_errors:
122
+ m = _messages.RetryPromptPart(
123
+ content=r.message,
124
+ tool_name=run_context.tool_name,
125
+ )
126
+ if run_context.tool_call_id:
127
+ m.tool_call_id = run_context.tool_call_id # pragma: no cover
128
+ raise ToolRetryError(m) from r
129
+ else:
130
+ raise
100
131
 
101
132
  # Record response if content inclusion is enabled
102
133
  if run_context.trace_include_content and span.is_recording():
@@ -663,16 +694,7 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
663
694
  else:
664
695
  raise
665
696
 
666
- try:
667
- output = await self.call(output, run_context)
668
- except ModelRetry as r:
669
- if wrap_validation_errors:
670
- m = _messages.RetryPromptPart(
671
- content=r.message,
672
- )
673
- raise ToolRetryError(m) from r
674
- else:
675
- raise # pragma: no cover
697
+ output = await self.call(output, run_context, wrap_validation_errors)
676
698
 
677
699
  return output
678
700
 
@@ -691,12 +713,15 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
691
713
  self,
692
714
  output: Any,
693
715
  run_context: RunContext[AgentDepsT],
716
+ wrap_validation_errors: bool = True,
694
717
  ):
695
718
  if k := self.outer_typed_dict_key:
696
719
  output = output[k]
697
720
 
698
721
  if self._function_schema:
699
- output = await execute_output_function_with_span(self._function_schema, run_context, output)
722
+ output = await execute_traced_output_function(
723
+ self._function_schema, run_context, output, wrap_validation_errors
724
+ )
700
725
 
701
726
  return output
702
727
 
@@ -856,16 +881,7 @@ class PlainTextOutputProcessor(BaseOutputProcessor[OutputDataT]):
856
881
  wrap_validation_errors: bool = True,
857
882
  ) -> OutputDataT:
858
883
  args = {self._str_argument_name: data}
859
- try:
860
- output = await execute_output_function_with_span(self._function_schema, run_context, args)
861
- except ModelRetry as r:
862
- if wrap_validation_errors:
863
- m = _messages.RetryPromptPart(
864
- content=r.message,
865
- )
866
- raise ToolRetryError(m) from r
867
- else:
868
- raise # pragma: no cover
884
+ output = await execute_traced_output_function(self._function_schema, run_context, args, wrap_validation_errors)
869
885
 
870
886
  return cast(OutputDataT, output)
871
887
 
@@ -975,7 +991,7 @@ class OutputToolset(AbstractToolset[AgentDepsT]):
975
991
  async def call_tool(
976
992
  self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT]
977
993
  ) -> Any:
978
- output = await self.processors[name].call(tool_args, ctx)
994
+ output = await self.processors[name].call(tool_args, ctx, wrap_validation_errors=False)
979
995
  for validator in self.output_validators:
980
996
  output = await validator.validate(output, ctx, wrap_validation_errors=False)
981
997
  return output
@@ -17,6 +17,7 @@ from collections.abc import Hashable
17
17
  from dataclasses import dataclass, field, replace
18
18
  from typing import Any, Union
19
19
 
20
+ from pydantic_ai._thinking_part import END_THINK_TAG, START_THINK_TAG
20
21
  from pydantic_ai.exceptions import UnexpectedModelBehavior
21
22
  from pydantic_ai.messages import (
22
23
  ModelResponsePart,
@@ -69,9 +70,10 @@ class ModelResponsePartsManager:
69
70
  def handle_text_delta(
70
71
  self,
71
72
  *,
72
- vendor_part_id: Hashable | None,
73
+ vendor_part_id: VendorId | None,
73
74
  content: str,
74
- ) -> ModelResponseStreamEvent:
75
+ extract_think_tags: bool = False,
76
+ ) -> ModelResponseStreamEvent | None:
75
77
  """Handle incoming text content, creating or updating a TextPart in the manager as appropriate.
76
78
 
77
79
  When `vendor_part_id` is None, the latest part is updated if it exists and is a TextPart;
@@ -83,9 +85,12 @@ class ModelResponsePartsManager:
83
85
  of text. If None, a new part will be created unless the latest part is already
84
86
  a TextPart.
85
87
  content: The text content to append to the appropriate TextPart.
88
+ extract_think_tags: Whether to extract `<think>` tags from the text content and handle them as thinking parts.
86
89
 
87
90
  Returns:
88
- A `PartStartEvent` if a new part was created, or a `PartDeltaEvent` if an existing part was updated.
91
+ - A `PartStartEvent` if a new part was created.
92
+ - A `PartDeltaEvent` if an existing part was updated.
93
+ - `None` if no new event is emitted (e.g., the first text part was all whitespace).
89
94
 
90
95
  Raises:
91
96
  UnexpectedModelBehavior: If attempting to apply text content to a part that is not a TextPart.
@@ -104,11 +109,32 @@ class ModelResponsePartsManager:
104
109
  part_index = self._vendor_id_to_part_index.get(vendor_part_id)
105
110
  if part_index is not None:
106
111
  existing_part = self._parts[part_index]
107
- if not isinstance(existing_part, TextPart):
112
+
113
+ if extract_think_tags and isinstance(existing_part, ThinkingPart):
114
+ # We may be building a thinking part instead of a text part if we had previously seen a `<think>` tag
115
+ if content == END_THINK_TAG:
116
+ # When we see `</think>`, we're done with the thinking part and the next text delta will need a new part
117
+ self._vendor_id_to_part_index.pop(vendor_part_id)
118
+ return None
119
+ else:
120
+ return self.handle_thinking_delta(vendor_part_id=vendor_part_id, content=content)
121
+ elif isinstance(existing_part, TextPart):
122
+ existing_text_part_and_index = existing_part, part_index
123
+ else:
108
124
  raise UnexpectedModelBehavior(f'Cannot apply a text delta to {existing_part=}')
109
- existing_text_part_and_index = existing_part, part_index
125
+
126
+ if extract_think_tags and content == START_THINK_TAG:
127
+ # When we see a `<think>` tag (which is a single token), we'll build a new thinking part instead
128
+ self._vendor_id_to_part_index.pop(vendor_part_id, None)
129
+ return self.handle_thinking_delta(vendor_part_id=vendor_part_id, content='')
110
130
 
111
131
  if existing_text_part_and_index is None:
132
+ # If the first text delta is all whitespace, don't emit a new part yet.
133
+ # This is a workaround for models that emit `<think>\n</think>\n\n` ahead of tool calls (e.g. Ollama + Qwen3),
134
+ # which we don't want to end up treating as a final result.
135
+ if content.isspace():
136
+ return None
137
+
112
138
  # There is no existing text part that should be updated, so create a new one
113
139
  new_part_index = len(self._parts)
114
140
  part = TextPart(content=content)
@@ -291,12 +291,12 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
291
291
  if isinstance(deps, StateHandler):
292
292
  deps.state = run_input.state
293
293
 
294
- history = _History.from_ag_ui(run_input.messages)
294
+ messages = _messages_from_ag_ui(run_input.messages)
295
295
 
296
296
  async with self.agent.iter(
297
297
  user_prompt=None,
298
298
  output_type=[output_type or self.agent.output_type, DeferredToolCalls],
299
- message_history=history.messages,
299
+ message_history=messages,
300
300
  model=model,
301
301
  deps=deps,
302
302
  model_settings=model_settings,
@@ -305,7 +305,7 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
305
305
  infer_name=infer_name,
306
306
  toolsets=toolsets,
307
307
  ) as run:
308
- async for event in self._agent_stream(run, history):
308
+ async for event in self._agent_stream(run):
309
309
  yield encoder.encode(event)
310
310
  except _RunError as e:
311
311
  yield encoder.encode(
@@ -327,20 +327,18 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
327
327
  async def _agent_stream(
328
328
  self,
329
329
  run: AgentRun[AgentDepsT, Any],
330
- history: _History,
331
330
  ) -> AsyncGenerator[BaseEvent, None]:
332
331
  """Run the agent streaming responses using AG-UI protocol events.
333
332
 
334
333
  Args:
335
334
  run: The agent run to process.
336
- history: The history of messages and tool calls to use for the run.
337
335
 
338
336
  Yields:
339
337
  AG-UI Server-Sent Events (SSE).
340
338
  """
341
339
  async for node in run:
340
+ stream_ctx = _RequestStreamContext()
342
341
  if isinstance(node, ModelRequestNode):
343
- stream_ctx = _RequestStreamContext()
344
342
  async with node.stream(run.ctx) as request_stream:
345
343
  async for agent_event in request_stream:
346
344
  async for msg in self._handle_model_request_event(stream_ctx, agent_event):
@@ -352,8 +350,8 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
352
350
  elif isinstance(node, CallToolsNode):
353
351
  async with node.stream(run.ctx) as handle_stream:
354
352
  async for event in handle_stream:
355
- if isinstance(event, FunctionToolResultEvent) and isinstance(event.result, ToolReturnPart):
356
- async for msg in self._handle_tool_result_event(event.result, history.prompt_message_id):
353
+ if isinstance(event, FunctionToolResultEvent):
354
+ async for msg in self._handle_tool_result_event(stream_ctx, event):
357
355
  yield msg
358
356
 
359
357
  async def _handle_model_request_event(
@@ -382,19 +380,26 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
382
380
  yield TextMessageStartEvent(
383
381
  message_id=message_id,
384
382
  )
385
- stream_ctx.part_end = TextMessageEndEvent(
386
- message_id=message_id,
387
- )
388
383
  if part.content: # pragma: no branch
389
384
  yield TextMessageContentEvent(
390
385
  message_id=message_id,
391
386
  delta=part.content,
392
387
  )
388
+ stream_ctx.part_end = TextMessageEndEvent(
389
+ message_id=message_id,
390
+ )
393
391
  elif isinstance(part, ToolCallPart): # pragma: no branch
392
+ message_id = stream_ctx.message_id or stream_ctx.new_message_id()
394
393
  yield ToolCallStartEvent(
395
394
  tool_call_id=part.tool_call_id,
396
395
  tool_call_name=part.tool_name,
396
+ parent_message_id=message_id,
397
397
  )
398
+ if part.args:
399
+ yield ToolCallArgsEvent(
400
+ tool_call_id=part.tool_call_id,
401
+ delta=part.args if isinstance(part.args, str) else json.dumps(part.args),
402
+ )
398
403
  stream_ctx.part_end = ToolCallEndEvent(
399
404
  tool_call_id=part.tool_call_id,
400
405
  )
@@ -407,7 +412,7 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
407
412
  # used to indicate the start of thinking.
408
413
  yield ThinkingTextMessageContentEvent(
409
414
  type=EventType.THINKING_TEXT_MESSAGE_CONTENT,
410
- delta=part.content or '',
415
+ delta=part.content,
411
416
  )
412
417
  stream_ctx.part_end = ThinkingTextMessageEndEvent(
413
418
  type=EventType.THINKING_TEXT_MESSAGE_END,
@@ -435,20 +440,25 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
435
440
 
436
441
  async def _handle_tool_result_event(
437
442
  self,
438
- result: ToolReturnPart,
439
- prompt_message_id: str,
443
+ stream_ctx: _RequestStreamContext,
444
+ event: FunctionToolResultEvent,
440
445
  ) -> AsyncGenerator[BaseEvent, None]:
441
446
  """Convert a tool call result to AG-UI events.
442
447
 
443
448
  Args:
444
- result: The tool call result to process.
445
- prompt_message_id: The message ID of the prompt that initiated the tool call.
449
+ stream_ctx: The request stream context to manage state.
450
+ event: The tool call result event to process.
446
451
 
447
452
  Yields:
448
453
  AG-UI Server-Sent Events (SSE).
449
454
  """
455
+ result = event.result
456
+ if not isinstance(result, ToolReturnPart):
457
+ return
458
+
459
+ message_id = stream_ctx.new_message_id()
450
460
  yield ToolCallResultEvent(
451
- message_id=prompt_message_id,
461
+ message_id=message_id,
452
462
  type=EventType.TOOL_CALL_RESULT,
453
463
  role='tool',
454
464
  tool_call_id=result.tool_call_id,
@@ -468,75 +478,55 @@ class _Adapter(Generic[AgentDepsT, OutputDataT]):
468
478
  yield item
469
479
 
470
480
 
471
- @dataclass
472
- class _History:
473
- """A simple history representation for AG-UI protocol."""
474
-
475
- prompt_message_id: str # The ID of the last user message.
476
- messages: list[ModelMessage]
477
-
478
- @classmethod
479
- def from_ag_ui(cls, messages: list[Message]) -> _History:
480
- """Convert a AG-UI history to a Pydantic AI one.
481
-
482
- Args:
483
- messages: List of AG-UI messages to convert.
484
-
485
- Returns:
486
- List of Pydantic AI model messages.
487
- """
488
- prompt_message_id = ''
489
- result: list[ModelMessage] = []
490
- tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping.
491
- for msg in messages:
492
- if isinstance(msg, UserMessage):
493
- prompt_message_id = msg.id
494
- result.append(ModelRequest(parts=[UserPromptPart(content=msg.content)]))
495
- elif isinstance(msg, AssistantMessage):
496
- if msg.tool_calls:
497
- for tool_call in msg.tool_calls:
498
- tool_calls[tool_call.id] = tool_call.function.name
499
-
500
- result.append(
501
- ModelResponse(
502
- parts=[
503
- ToolCallPart(
504
- tool_name=tool_call.function.name,
505
- tool_call_id=tool_call.id,
506
- args=tool_call.function.arguments,
507
- )
508
- for tool_call in msg.tool_calls
509
- ]
510
- )
511
- )
512
-
513
- if msg.content:
514
- result.append(ModelResponse(parts=[TextPart(content=msg.content)]))
515
- elif isinstance(msg, SystemMessage):
516
- result.append(ModelRequest(parts=[SystemPromptPart(content=msg.content)]))
517
- elif isinstance(msg, ToolMessage):
518
- tool_name = tool_calls.get(msg.tool_call_id)
519
- if tool_name is None: # pragma: no cover
520
- raise _ToolCallNotFoundError(tool_call_id=msg.tool_call_id)
481
+ def _messages_from_ag_ui(messages: list[Message]) -> list[ModelMessage]:
482
+ """Convert a AG-UI history to a Pydantic AI one."""
483
+ result: list[ModelMessage] = []
484
+ tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping.
485
+ for msg in messages:
486
+ if isinstance(msg, UserMessage):
487
+ result.append(ModelRequest(parts=[UserPromptPart(content=msg.content)]))
488
+ elif isinstance(msg, AssistantMessage):
489
+ if msg.tool_calls:
490
+ for tool_call in msg.tool_calls:
491
+ tool_calls[tool_call.id] = tool_call.function.name
521
492
 
522
493
  result.append(
523
- ModelRequest(
494
+ ModelResponse(
524
495
  parts=[
525
- ToolReturnPart(
526
- tool_name=tool_name,
527
- content=msg.content,
528
- tool_call_id=msg.tool_call_id,
496
+ ToolCallPart(
497
+ tool_name=tool_call.function.name,
498
+ tool_call_id=tool_call.id,
499
+ args=tool_call.function.arguments,
529
500
  )
501
+ for tool_call in msg.tool_calls
530
502
  ]
531
503
  )
532
504
  )
533
- elif isinstance(msg, DeveloperMessage): # pragma: no branch
534
- result.append(ModelRequest(parts=[SystemPromptPart(content=msg.content)]))
535
505
 
536
- return cls(
537
- prompt_message_id=prompt_message_id,
538
- messages=result,
539
- )
506
+ if msg.content:
507
+ result.append(ModelResponse(parts=[TextPart(content=msg.content)]))
508
+ elif isinstance(msg, SystemMessage):
509
+ result.append(ModelRequest(parts=[SystemPromptPart(content=msg.content)]))
510
+ elif isinstance(msg, ToolMessage):
511
+ tool_name = tool_calls.get(msg.tool_call_id)
512
+ if tool_name is None: # pragma: no cover
513
+ raise _ToolCallNotFoundError(tool_call_id=msg.tool_call_id)
514
+
515
+ result.append(
516
+ ModelRequest(
517
+ parts=[
518
+ ToolReturnPart(
519
+ tool_name=tool_name,
520
+ content=msg.content,
521
+ tool_call_id=msg.tool_call_id,
522
+ )
523
+ ]
524
+ )
525
+ )
526
+ elif isinstance(msg, DeveloperMessage): # pragma: no branch
527
+ result.append(ModelRequest(parts=[SystemPromptPart(content=msg.content)]))
528
+
529
+ return result
540
530
 
541
531
 
542
532
  @runtime_checkable
@@ -36,7 +36,7 @@ from ._tool_manager import ToolManager
36
36
  from .models.instrumented import InstrumentationSettings, InstrumentedModel, instrument_model
37
37
  from .output import OutputDataT, OutputSpec
38
38
  from .profiles import ModelProfile
39
- from .result import FinalResult, StreamedRunResult
39
+ from .result import AgentStream, FinalResult, StreamedRunResult
40
40
  from .settings import ModelSettings, merge_model_settings
41
41
  from .tools import (
42
42
  AgentDepsT,
@@ -1127,29 +1127,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1127
1127
  while True:
1128
1128
  if self.is_model_request_node(node):
1129
1129
  graph_ctx = agent_run.ctx
1130
- async with node._stream(graph_ctx) as streamed_response: # pyright: ignore[reportPrivateUsage]
1131
-
1132
- async def stream_to_final(
1133
- s: models.StreamedResponse,
1134
- ) -> FinalResult[models.StreamedResponse] | None:
1135
- output_schema = graph_ctx.deps.output_schema
1136
- async for maybe_part_event in streamed_response:
1137
- if isinstance(maybe_part_event, _messages.PartStartEvent):
1138
- new_part = maybe_part_event.part
1139
- if isinstance(new_part, _messages.TextPart) and isinstance(
1140
- output_schema, _output.TextOutputSchema
1141
- ):
1142
- return FinalResult(s, None, None)
1143
- elif isinstance(new_part, _messages.ToolCallPart) and (
1144
- tool_def := graph_ctx.deps.tool_manager.get_tool_def(new_part.tool_name)
1145
- ):
1146
- if tool_def.kind == 'output':
1147
- return FinalResult(s, new_part.tool_name, new_part.tool_call_id)
1148
- elif tool_def.kind == 'deferred':
1149
- return FinalResult(s, None, None)
1130
+ async with node.stream(graph_ctx) as stream:
1131
+
1132
+ async def stream_to_final(s: AgentStream) -> FinalResult[AgentStream] | None:
1133
+ async for event in stream:
1134
+ if isinstance(event, _messages.FinalResultEvent):
1135
+ return FinalResult(s, event.tool_name, event.tool_call_id)
1150
1136
  return None
1151
1137
 
1152
- final_result = await stream_to_final(streamed_response)
1138
+ final_result = await stream_to_final(stream)
1153
1139
  if final_result is not None:
1154
1140
  if yielded:
1155
1141
  raise exceptions.AgentRunError('Agent run produced final results') # pragma: no cover
@@ -1184,14 +1170,8 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1184
1170
  yield StreamedRunResult(
1185
1171
  messages,
1186
1172
  graph_ctx.deps.new_message_index,
1187
- graph_ctx.deps.usage_limits,
1188
- streamed_response,
1189
- graph_ctx.deps.output_schema,
1190
- _agent_graph.build_run_context(graph_ctx),
1191
- graph_ctx.deps.output_validators,
1192
- final_result.tool_name,
1173
+ stream,
1193
1174
  on_complete,
1194
- graph_ctx.deps.tool_manager,
1195
1175
  )
1196
1176
  break
1197
1177
  next_node = await agent_run.next(node)