pydantic-ai-slim 1.0.13__tar.gz → 1.0.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/__init__.py +19 -1
  3. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_agent_graph.py +118 -97
  4. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_cli.py +4 -7
  5. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_output.py +236 -192
  6. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_parts_manager.py +8 -42
  7. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_tool_manager.py +9 -16
  8. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/abstract.py +169 -1
  9. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/builtin_tools.py +82 -0
  10. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/direct.py +7 -0
  11. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_agent.py +106 -3
  12. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_agent.py +123 -6
  13. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_model.py +8 -0
  14. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/format_prompt.py +4 -3
  15. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/mcp.py +20 -10
  16. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/messages.py +149 -3
  17. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/__init__.py +15 -1
  18. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/anthropic.py +7 -3
  19. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/cohere.py +4 -0
  20. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/function.py +7 -4
  21. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/gemini.py +8 -0
  22. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/google.py +56 -23
  23. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/groq.py +11 -5
  24. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/huggingface.py +5 -3
  25. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/mistral.py +6 -8
  26. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/openai.py +197 -58
  27. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/test.py +4 -0
  28. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/output.py +5 -2
  29. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/__init__.py +2 -0
  30. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/google.py +5 -2
  31. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/openai.py +2 -1
  32. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/result.py +46 -30
  33. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/run.py +35 -7
  34. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/usage.py +5 -4
  35. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/.gitignore +0 -0
  36. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/LICENSE +0 -0
  37. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/README.md +0 -0
  38. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/__main__.py +0 -0
  39. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_a2a.py +0 -0
  40. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_function_schema.py +0 -0
  41. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_griffe.py +0 -0
  42. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_instrumentation.py +0 -0
  43. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_json_schema.py +0 -0
  44. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_mcp.py +0 -0
  45. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_otel_messages.py +0 -0
  46. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_run_context.py +0 -0
  47. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_system_prompt.py +0 -0
  48. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_thinking_part.py +0 -0
  49. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/_utils.py +0 -0
  50. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/ag_ui.py +0 -0
  51. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/__init__.py +0 -0
  52. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/wrapper.py +0 -0
  53. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/__init__.py +0 -0
  54. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  55. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/tavily.py +0 -0
  56. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/__init__.py +0 -0
  57. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  58. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  59. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  60. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  61. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  62. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  63. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  64. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  65. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  66. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  67. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/exceptions.py +0 -0
  68. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/__init__.py +0 -0
  69. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/aci.py +0 -0
  70. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/langchain.py +0 -0
  71. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/bedrock.py +0 -0
  72. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/fallback.py +0 -0
  73. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/instrumented.py +0 -0
  74. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/mcp_sampling.py +0 -0
  75. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/wrapper.py +0 -0
  76. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/amazon.py +0 -0
  77. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/anthropic.py +0 -0
  78. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/cohere.py +0 -0
  79. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/deepseek.py +0 -0
  80. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/grok.py +0 -0
  81. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/groq.py +0 -0
  82. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/harmony.py +0 -0
  83. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/meta.py +0 -0
  84. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/mistral.py +0 -0
  85. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/moonshotai.py +0 -0
  86. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/qwen.py +0 -0
  87. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/__init__.py +0 -0
  88. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/anthropic.py +0 -0
  89. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/azure.py +0 -0
  90. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/bedrock.py +0 -0
  91. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/cerebras.py +0 -0
  92. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/cohere.py +0 -0
  93. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/deepseek.py +0 -0
  94. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/fireworks.py +0 -0
  95. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/gateway.py +0 -0
  96. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/github.py +0 -0
  97. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google.py +0 -0
  98. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google_gla.py +0 -0
  99. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google_vertex.py +0 -0
  100. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/grok.py +0 -0
  101. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/groq.py +0 -0
  102. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/heroku.py +0 -0
  103. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/huggingface.py +0 -0
  104. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/litellm.py +0 -0
  105. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/mistral.py +0 -0
  106. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/moonshotai.py +0 -0
  107. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/ollama.py +0 -0
  108. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/openai.py +0 -0
  109. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/openrouter.py +0 -0
  110. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/together.py +0 -0
  111. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/vercel.py +0 -0
  112. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/py.typed +0 -0
  113. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/retries.py +0 -0
  114. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/settings.py +0 -0
  115. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/tools.py +0 -0
  116. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/__init__.py +0 -0
  117. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/_dynamic.py +0 -0
  118. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/abstract.py +0 -0
  119. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/approval_required.py +0 -0
  120. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/combined.py +0 -0
  121. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/external.py +0 -0
  122. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/filtered.py +0 -0
  123. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/function.py +0 -0
  124. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/prefixed.py +0 -0
  125. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/prepared.py +0 -0
  126. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/renamed.py +0 -0
  127. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/wrapper.py +0 -0
  128. {pydantic_ai_slim-1.0.13 → pydantic_ai_slim-1.0.15}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.13
3
+ Version: 1.0.15
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.28
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.13
36
+ Requires-Dist: pydantic-graph==1.0.15
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.13; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.15; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -9,7 +9,14 @@ from .agent import (
9
9
  UserPromptNode,
10
10
  capture_run_messages,
11
11
  )
12
- from .builtin_tools import CodeExecutionTool, UrlContextTool, WebSearchTool, WebSearchUserLocation
12
+ from .builtin_tools import (
13
+ CodeExecutionTool,
14
+ ImageGenerationTool,
15
+ MemoryTool,
16
+ UrlContextTool,
17
+ WebSearchTool,
18
+ WebSearchUserLocation,
19
+ )
13
20
  from .exceptions import (
14
21
  AgentRunError,
15
22
  ApprovalRequired,
@@ -30,11 +37,13 @@ from .messages import (
30
37
  BaseToolCallPart,
31
38
  BaseToolReturnPart,
32
39
  BinaryContent,
40
+ BinaryImage,
33
41
  BuiltinToolCallPart,
34
42
  BuiltinToolReturnPart,
35
43
  DocumentFormat,
36
44
  DocumentMediaType,
37
45
  DocumentUrl,
46
+ FilePart,
38
47
  FileUrl,
39
48
  FinalResultEvent,
40
49
  FinishReason,
@@ -79,6 +88,7 @@ from .profiles import (
79
88
  ModelProfile,
80
89
  ModelProfileSpec,
81
90
  )
91
+ from .run import AgentRun, AgentRunResult, AgentRunResultEvent
82
92
  from .settings import ModelSettings
83
93
  from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied
84
94
  from .toolsets import (
@@ -131,6 +141,7 @@ __all__ = (
131
141
  'DocumentMediaType',
132
142
  'DocumentUrl',
133
143
  'FileUrl',
144
+ 'FilePart',
134
145
  'FinalResultEvent',
135
146
  'FinishReason',
136
147
  'FunctionToolCallEvent',
@@ -139,6 +150,7 @@ __all__ = (
139
150
  'ImageFormat',
140
151
  'ImageMediaType',
141
152
  'ImageUrl',
153
+ 'BinaryImage',
142
154
  'ModelMessage',
143
155
  'ModelMessagesTypeAdapter',
144
156
  'ModelRequest',
@@ -197,6 +209,8 @@ __all__ = (
197
209
  'WebSearchUserLocation',
198
210
  'UrlContextTool',
199
211
  'CodeExecutionTool',
212
+ 'ImageGenerationTool',
213
+ 'MemoryTool',
200
214
  # output
201
215
  'ToolOutput',
202
216
  'NativeOutput',
@@ -211,5 +225,9 @@ __all__ = (
211
225
  'RunUsage',
212
226
  'RequestUsage',
213
227
  'UsageLimits',
228
+ # run
229
+ 'AgentRun',
230
+ 'AgentRunResult',
231
+ 'AgentRunResultEvent',
214
232
  )
215
233
  __version__ = _metadata_version('pydantic_ai_slim')
@@ -87,10 +87,10 @@ Can optionally accept a `RunContext` as a parameter.
87
87
  class GraphAgentState:
88
88
  """State kept across the execution of the agent graph."""
89
89
 
90
- message_history: list[_messages.ModelMessage]
91
- usage: _usage.RunUsage
92
- retries: int
93
- run_step: int
90
+ message_history: list[_messages.ModelMessage] = dataclasses.field(default_factory=list)
91
+ usage: _usage.RunUsage = dataclasses.field(default_factory=_usage.RunUsage)
92
+ retries: int = 0
93
+ run_step: int = 0
94
94
 
95
95
  def increment_retries(self, max_result_retries: int, error: BaseException | None = None) -> None:
96
96
  self.retries += 1
@@ -222,7 +222,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
222
222
  if self.user_prompt is None:
223
223
  # Skip ModelRequestNode and go directly to CallToolsNode
224
224
  return CallToolsNode[DepsT, NodeRunEndT](last_message)
225
- elif any(isinstance(part, _messages.ToolCallPart) for part in last_message.parts):
225
+ elif last_message.tool_calls:
226
226
  raise exceptions.UserError(
227
227
  'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
228
228
  )
@@ -230,7 +230,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
230
230
  # Build the run context after `ctx.deps.prompt` has been updated
231
231
  run_context = build_run_context(ctx)
232
232
 
233
- parts: list[_messages.ModelRequestPart] = []
234
233
  if messages:
235
234
  await self._reevaluate_dynamic_prompts(messages, run_context)
236
235
 
@@ -272,7 +271,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
272
271
  raise exceptions.UserError(
273
272
  'Tool call results were provided, but the message history does not contain a `ModelResponse`.'
274
273
  )
275
- if not any(isinstance(part, _messages.ToolCallPart) for part in last_model_response.parts):
274
+ if not last_model_response.tool_calls:
276
275
  raise exceptions.UserError(
277
276
  'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
278
277
  )
@@ -356,9 +355,6 @@ async def _prepare_request_parameters(
356
355
  if isinstance(output_schema, _output.NativeOutputSchema):
357
356
  output_object = output_schema.object_def
358
357
 
359
- # ToolOrTextOutputSchema, NativeOutputSchema, and PromptedOutputSchema all inherit from TextOutputSchema
360
- allow_text_output = isinstance(output_schema, _output.TextOutputSchema)
361
-
362
358
  function_tools: list[ToolDefinition] = []
363
359
  output_tools: list[ToolDefinition] = []
364
360
  for tool_def in ctx.deps.tool_manager.tool_defs:
@@ -373,7 +369,8 @@ async def _prepare_request_parameters(
373
369
  output_mode=output_schema.mode,
374
370
  output_tools=output_tools,
375
371
  output_object=output_object,
376
- allow_text_output=allow_text_output,
372
+ allow_text_output=output_schema.allows_text,
373
+ allow_image_output=output_schema.allows_image,
377
374
  )
378
375
 
379
376
 
@@ -458,15 +455,13 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
458
455
 
459
456
  original_history = ctx.state.message_history[:]
460
457
  message_history = await _process_message_history(original_history, ctx.deps.history_processors, run_context)
461
- # Never merge the new `ModelRequest` with the one preceding it, to keep `new_messages()` from accidentally including part of the existing message history
462
- message_history = [*_clean_message_history(message_history[:-1]), message_history[-1]]
463
458
  # `ctx.state.message_history` is the same list used by `capture_run_messages`, so we should replace its contents, not the reference
464
459
  ctx.state.message_history[:] = message_history
465
460
  # Update the new message index to ensure `result.new_messages()` returns the correct messages
466
461
  ctx.deps.new_message_index -= len(original_history) - len(message_history)
467
462
 
468
- # Do one more cleaning pass to merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts,
469
- # but don't store it in the message history on state.
463
+ # Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts,
464
+ # but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries.
470
465
  # See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary
471
466
  message_history = _clean_message_history(message_history)
472
467
 
@@ -545,27 +540,58 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
545
540
  if self._events_iterator is None:
546
541
  # Ensure that the stream is only run once
547
542
 
543
+ output_schema = ctx.deps.output_schema
544
+
548
545
  async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
546
+ if not self.model_response.parts:
547
+ # we got an empty response.
548
+ # this sometimes happens with anthropic (and perhaps other models)
549
+ # when the model has already returned text along side tool calls
550
+ if text_processor := output_schema.text_processor:
551
+ # in this scenario, if text responses are allowed, we return text from the most recent model
552
+ # response, if any
553
+ for message in reversed(ctx.state.message_history):
554
+ if isinstance(message, _messages.ModelResponse):
555
+ text = ''
556
+ for part in message.parts:
557
+ if isinstance(part, _messages.TextPart):
558
+ text += part.content
559
+ elif isinstance(part, _messages.BuiltinToolCallPart):
560
+ # Text parts before a built-in tool call are essentially thoughts,
561
+ # not part of the final result output, so we reset the accumulated text
562
+ text = '' # pragma: no cover
563
+ if text:
564
+ self._next_node = await self._handle_text_response(ctx, text, text_processor)
565
+ return
566
+
567
+ # Go back to the model request node with an empty request, which means we'll essentially
568
+ # resubmit the most recent request that resulted in an empty response,
569
+ # as the empty response and request will not create any items in the API payload,
570
+ # in the hope the model will return a non-empty response this time.
571
+ ctx.state.increment_retries(ctx.deps.max_result_retries)
572
+ self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
573
+ return
574
+
549
575
  text = ''
550
576
  tool_calls: list[_messages.ToolCallPart] = []
551
- invisible_parts: bool = False
577
+ files: list[_messages.BinaryContent] = []
552
578
 
553
579
  for part in self.model_response.parts:
554
580
  if isinstance(part, _messages.TextPart):
555
581
  text += part.content
556
582
  elif isinstance(part, _messages.ToolCallPart):
557
583
  tool_calls.append(part)
584
+ elif isinstance(part, _messages.FilePart):
585
+ files.append(part.content)
558
586
  elif isinstance(part, _messages.BuiltinToolCallPart):
559
587
  # Text parts before a built-in tool call are essentially thoughts,
560
588
  # not part of the final result output, so we reset the accumulated text
561
589
  text = ''
562
- invisible_parts = True
563
590
  yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
564
591
  elif isinstance(part, _messages.BuiltinToolReturnPart):
565
- invisible_parts = True
566
592
  yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
567
593
  elif isinstance(part, _messages.ThinkingPart):
568
- invisible_parts = True
594
+ pass
569
595
  else:
570
596
  assert_never(part)
571
597
 
@@ -574,47 +600,35 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
574
600
  # This accounts for cases like anthropic returns that might contain a text response
575
601
  # and a tool call response, where the text response just indicates the tool call will happen.
576
602
  try:
603
+ alternatives: list[str] = []
577
604
  if tool_calls:
578
605
  async for event in self._handle_tool_calls(ctx, tool_calls):
579
606
  yield event
580
- elif text:
581
- # No events are emitted during the handling of text responses, so we don't need to yield anything
582
- self._next_node = await self._handle_text_response(ctx, text)
583
- elif invisible_parts:
584
- # handle responses with only thinking or built-in tool parts.
585
- # this can happen with models that support thinking mode when they don't provide
586
- # actionable output alongside their thinking content. so we tell the model to try again.
587
- m = _messages.RetryPromptPart(
588
- content='Responses without text or tool calls are not permitted.',
589
- )
590
- raise ToolRetryError(m)
607
+ return
608
+ elif output_schema.toolset:
609
+ alternatives.append('include your response in a tool call')
591
610
  else:
592
- # we got an empty response with no tool calls, text, thinking, or built-in tool calls.
593
- # this sometimes happens with anthropic (and perhaps other models)
594
- # when the model has already returned text along side tool calls
595
- # in this scenario, if text responses are allowed, we return text from the most recent model
596
- # response, if any
597
- if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
598
- for message in reversed(ctx.state.message_history):
599
- if isinstance(message, _messages.ModelResponse):
600
- text = ''
601
- for part in message.parts:
602
- if isinstance(part, _messages.TextPart):
603
- text += part.content
604
- elif isinstance(part, _messages.BuiltinToolCallPart):
605
- # Text parts before a built-in tool call are essentially thoughts,
606
- # not part of the final result output, so we reset the accumulated text
607
- text = '' # pragma: no cover
608
- if text:
609
- self._next_node = await self._handle_text_response(ctx, text)
610
- return
611
-
612
- # Go back to the model request node with an empty request, which means we'll essentially
613
- # resubmit the most recent request that resulted in an empty response,
614
- # as the empty response and request will not create any items in the API payload,
615
- # in the hope the model will return a non-empty response this time.
616
- ctx.state.increment_retries(ctx.deps.max_result_retries)
617
- self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
611
+ alternatives.append('call a tool')
612
+
613
+ if output_schema.allows_image:
614
+ if image := next((file for file in files if isinstance(file, _messages.BinaryImage)), None):
615
+ self._next_node = await self._handle_image_response(ctx, image)
616
+ return
617
+ alternatives.append('return an image')
618
+
619
+ if text_processor := output_schema.text_processor:
620
+ if text:
621
+ self._next_node = await self._handle_text_response(ctx, text, text_processor)
622
+ return
623
+ alternatives.insert(0, 'return text')
624
+
625
+ # handle responses with only parts that don't constitute output.
626
+ # This can happen with models that support thinking mode when they don't provide
627
+ # actionable output alongside their thinking content. so we tell the model to try again.
628
+ m = _messages.RetryPromptPart(
629
+ content=f'Please {" or ".join(alternatives)}.',
630
+ )
631
+ raise ToolRetryError(m)
618
632
  except ToolRetryError as e:
619
633
  ctx.state.increment_retries(ctx.deps.max_result_retries, e)
620
634
  self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
@@ -657,6 +671,28 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
657
671
  _messages.ModelRequest(parts=output_parts, instructions=instructions)
658
672
  )
659
673
 
674
+ async def _handle_text_response(
675
+ self,
676
+ ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
677
+ text: str,
678
+ text_processor: _output.BaseOutputProcessor[NodeRunEndT],
679
+ ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
680
+ run_context = build_run_context(ctx)
681
+
682
+ result_data = await text_processor.process(text, run_context)
683
+
684
+ for validator in ctx.deps.output_validators:
685
+ result_data = await validator.validate(result_data, run_context)
686
+ return self._handle_final_result(ctx, result.FinalResult(result_data), [])
687
+
688
+ async def _handle_image_response(
689
+ self,
690
+ ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
691
+ image: _messages.BinaryImage,
692
+ ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
693
+ result_data = cast(NodeRunEndT, image)
694
+ return self._handle_final_result(ctx, result.FinalResult(result_data), [])
695
+
660
696
  def _handle_final_result(
661
697
  self,
662
698
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
@@ -671,26 +707,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
671
707
 
672
708
  return End(final_result)
673
709
 
674
- async def _handle_text_response(
675
- self,
676
- ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
677
- text: str,
678
- ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
679
- output_schema = ctx.deps.output_schema
680
- run_context = build_run_context(ctx)
681
-
682
- if isinstance(output_schema, _output.TextOutputSchema):
683
- result_data = await output_schema.process(text, run_context)
684
- else:
685
- m = _messages.RetryPromptPart(
686
- content='Plain text responses are not permitted, please include your response in a tool call',
687
- )
688
- raise ToolRetryError(m)
689
-
690
- for validator in ctx.deps.output_validators:
691
- result_data = await validator.validate(result_data, run_context)
692
- return self._handle_final_result(ctx, result.FinalResult(result_data), [])
693
-
694
710
  __repr__ = dataclasses_no_defaults_repr
695
711
 
696
712
 
@@ -823,6 +839,7 @@ async def process_tool_calls( # noqa: C901
823
839
  tool_calls=calls_to_run,
824
840
  tool_call_results=calls_to_run_results,
825
841
  tracer=ctx.deps.tracer,
842
+ usage=ctx.state.usage,
826
843
  usage_limits=ctx.deps.usage_limits,
827
844
  output_parts=output_parts,
828
845
  output_deferred_calls=deferred_calls,
@@ -869,7 +886,8 @@ async def _call_tools(
869
886
  tool_calls: list[_messages.ToolCallPart],
870
887
  tool_call_results: dict[str, DeferredToolResult],
871
888
  tracer: Tracer,
872
- usage_limits: _usage.UsageLimits | None,
889
+ usage: _usage.RunUsage,
890
+ usage_limits: _usage.UsageLimits,
873
891
  output_parts: list[_messages.ModelRequestPart],
874
892
  output_deferred_calls: dict[Literal['external', 'unapproved'], list[_messages.ToolCallPart]],
875
893
  ) -> AsyncIterator[_messages.HandleResponseEvent]:
@@ -877,6 +895,11 @@ async def _call_tools(
877
895
  user_parts_by_index: dict[int, _messages.UserPromptPart] = {}
878
896
  deferred_calls_by_index: dict[int, Literal['external', 'unapproved']] = {}
879
897
 
898
+ if usage_limits.tool_calls_limit is not None:
899
+ projected_usage = deepcopy(usage)
900
+ projected_usage.tool_calls += len(tool_calls)
901
+ usage_limits.check_before_tool_call(projected_usage)
902
+
880
903
  for call in tool_calls:
881
904
  yield _messages.FunctionToolCallEvent(call)
882
905
 
@@ -890,13 +913,19 @@ async def _call_tools(
890
913
 
891
914
  async def handle_call_or_result(
892
915
  coro_or_task: Awaitable[
893
- tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]
916
+ tuple[
917
+ _messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
918
+ ]
894
919
  ]
895
- | Task[tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]],
920
+ | Task[
921
+ tuple[
922
+ _messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
923
+ ]
924
+ ],
896
925
  index: int,
897
926
  ) -> _messages.HandleResponseEvent | None:
898
927
  try:
899
- tool_part, tool_user_part = (
928
+ tool_part, tool_user_content = (
900
929
  (await coro_or_task) if inspect.isawaitable(coro_or_task) else coro_or_task.result()
901
930
  )
902
931
  except exceptions.CallDeferred:
@@ -905,15 +934,15 @@ async def _call_tools(
905
934
  deferred_calls_by_index[index] = 'unapproved'
906
935
  else:
907
936
  tool_parts_by_index[index] = tool_part
908
- if tool_user_part:
909
- user_parts_by_index[index] = tool_user_part
937
+ if tool_user_content:
938
+ user_parts_by_index[index] = _messages.UserPromptPart(content=tool_user_content)
910
939
 
911
- return _messages.FunctionToolResultEvent(tool_part)
940
+ return _messages.FunctionToolResultEvent(tool_part, content=tool_user_content)
912
941
 
913
942
  if tool_manager.should_call_sequentially(tool_calls):
914
943
  for index, call in enumerate(tool_calls):
915
944
  if event := await handle_call_or_result(
916
- _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
945
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
917
946
  index,
918
947
  ):
919
948
  yield event
@@ -921,7 +950,7 @@ async def _call_tools(
921
950
  else:
922
951
  tasks = [
923
952
  asyncio.create_task(
924
- _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
953
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
925
954
  name=call.tool_name,
926
955
  )
927
956
  for call in tool_calls
@@ -948,15 +977,14 @@ async def _call_tool(
948
977
  tool_manager: ToolManager[DepsT],
949
978
  tool_call: _messages.ToolCallPart,
950
979
  tool_call_result: DeferredToolResult | None,
951
- usage_limits: _usage.UsageLimits | None,
952
- ) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]:
980
+ ) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None]:
953
981
  try:
954
982
  if tool_call_result is None:
955
- tool_result = await tool_manager.handle_call(tool_call, usage_limits=usage_limits)
983
+ tool_result = await tool_manager.handle_call(tool_call)
956
984
  elif isinstance(tool_call_result, ToolApproved):
957
985
  if tool_call_result.override_args is not None:
958
986
  tool_call = dataclasses.replace(tool_call, args=tool_call_result.override_args)
959
- tool_result = await tool_manager.handle_call(tool_call, usage_limits=usage_limits)
987
+ tool_result = await tool_manager.handle_call(tool_call)
960
988
  elif isinstance(tool_call_result, ToolDenied):
961
989
  return _messages.ToolReturnPart(
962
990
  tool_name=tool_call.tool_name,
@@ -1026,14 +1054,7 @@ async def _call_tool(
1026
1054
  metadata=tool_return.metadata,
1027
1055
  )
1028
1056
 
1029
- user_part: _messages.UserPromptPart | None = None
1030
- if tool_return.content:
1031
- user_part = _messages.UserPromptPart(
1032
- content=tool_return.content,
1033
- part_kind='user-prompt',
1034
- )
1035
-
1036
- return return_part, user_part
1057
+ return return_part, tool_return.content or None
1037
1058
 
1038
1059
 
1039
1060
  @dataclasses.dataclass
@@ -18,7 +18,7 @@ from . import __version__
18
18
  from ._run_context import AgentDepsT
19
19
  from .agent import AbstractAgent, Agent
20
20
  from .exceptions import UserError
21
- from .messages import ModelMessage, TextPart
21
+ from .messages import ModelMessage, ModelResponse
22
22
  from .models import KnownModelName, infer_model
23
23
  from .output import OutputDataT
24
24
 
@@ -351,14 +351,11 @@ def handle_slash_command(
351
351
  console.print('[dim]Exiting…[/dim]')
352
352
  return 0, multiline
353
353
  elif ident_prompt == '/cp':
354
- try:
355
- parts = messages[-1].parts
356
- except IndexError:
354
+ if not messages or not isinstance(messages[-1], ModelResponse):
357
355
  console.print('[dim]No output available to copy.[/dim]')
358
356
  else:
359
- text_to_copy = ''.join(part.content for part in parts if isinstance(part, TextPart))
360
- text_to_copy = text_to_copy.strip()
361
- if text_to_copy:
357
+ text_to_copy = messages[-1].text
358
+ if text_to_copy and (text_to_copy := text_to_copy.strip()):
362
359
  pyperclip.copy(text_to_copy)
363
360
  console.print('[dim]Copied last output to clipboard.[/dim]')
364
361
  else: