pydantic-ai-slim 1.0.14__tar.gz → 1.0.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (128) hide show
  1. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/__init__.py +19 -1
  3. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_agent_graph.py +129 -105
  4. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_cli.py +7 -10
  5. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_output.py +236 -192
  6. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_parts_manager.py +8 -42
  7. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_tool_manager.py +9 -16
  8. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/agent/__init__.py +18 -7
  9. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/agent/abstract.py +192 -23
  10. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/agent/wrapper.py +7 -4
  11. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/builtin_tools.py +82 -0
  12. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/direct.py +16 -9
  13. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/dbos/_agent.py +124 -18
  14. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_agent.py +139 -19
  15. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_model.py +8 -0
  16. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/format_prompt.py +9 -6
  17. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/mcp.py +20 -10
  18. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/messages.py +214 -44
  19. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/__init__.py +15 -1
  20. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/anthropic.py +27 -22
  21. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/cohere.py +4 -0
  22. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/function.py +7 -4
  23. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/gemini.py +8 -0
  24. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/google.py +56 -23
  25. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/groq.py +11 -5
  26. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/huggingface.py +5 -3
  27. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/mistral.py +6 -8
  28. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/openai.py +206 -58
  29. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/test.py +4 -0
  30. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/output.py +5 -2
  31. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/__init__.py +2 -0
  32. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/google.py +5 -2
  33. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/openai.py +2 -1
  34. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/result.py +51 -35
  35. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/run.py +35 -7
  36. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/usage.py +40 -5
  37. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pyproject.toml +1 -1
  38. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/.gitignore +0 -0
  39. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/LICENSE +0 -0
  40. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/README.md +0 -0
  41. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/__main__.py +0 -0
  42. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_a2a.py +0 -0
  43. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_function_schema.py +0 -0
  44. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_griffe.py +0 -0
  45. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_instrumentation.py +0 -0
  46. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_json_schema.py +0 -0
  47. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_mcp.py +0 -0
  48. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_otel_messages.py +0 -0
  49. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_run_context.py +0 -0
  50. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_system_prompt.py +0 -0
  51. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_thinking_part.py +0 -0
  52. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/_utils.py +0 -0
  53. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/ag_ui.py +0 -0
  54. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/common_tools/__init__.py +0 -0
  55. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  56. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/common_tools/tavily.py +0 -0
  57. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/__init__.py +0 -0
  58. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  59. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  60. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  61. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  62. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  63. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  64. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  65. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  66. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  67. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  68. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/exceptions.py +0 -0
  69. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/ext/__init__.py +0 -0
  70. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/ext/aci.py +0 -0
  71. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/ext/langchain.py +0 -0
  72. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/bedrock.py +0 -0
  73. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/fallback.py +0 -0
  74. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/instrumented.py +0 -0
  75. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/mcp_sampling.py +0 -0
  76. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/models/wrapper.py +0 -0
  77. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/amazon.py +0 -0
  78. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/anthropic.py +0 -0
  79. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/cohere.py +0 -0
  80. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/deepseek.py +0 -0
  81. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/grok.py +0 -0
  82. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/groq.py +0 -0
  83. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/harmony.py +0 -0
  84. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/meta.py +0 -0
  85. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/mistral.py +0 -0
  86. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/moonshotai.py +0 -0
  87. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/profiles/qwen.py +0 -0
  88. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/__init__.py +0 -0
  89. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/anthropic.py +0 -0
  90. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/azure.py +0 -0
  91. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/bedrock.py +0 -0
  92. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/cerebras.py +0 -0
  93. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/cohere.py +0 -0
  94. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/deepseek.py +0 -0
  95. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/fireworks.py +0 -0
  96. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/gateway.py +0 -0
  97. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/github.py +0 -0
  98. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/google.py +0 -0
  99. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/google_gla.py +0 -0
  100. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/google_vertex.py +0 -0
  101. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/grok.py +0 -0
  102. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/groq.py +0 -0
  103. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/heroku.py +0 -0
  104. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/huggingface.py +0 -0
  105. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/litellm.py +0 -0
  106. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/mistral.py +0 -0
  107. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/moonshotai.py +0 -0
  108. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/ollama.py +0 -0
  109. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/openai.py +0 -0
  110. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/openrouter.py +0 -0
  111. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/together.py +0 -0
  112. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/providers/vercel.py +0 -0
  113. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/py.typed +0 -0
  114. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/retries.py +0 -0
  115. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/settings.py +0 -0
  116. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/tools.py +0 -0
  117. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/__init__.py +0 -0
  118. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/_dynamic.py +0 -0
  119. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/abstract.py +0 -0
  120. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/approval_required.py +0 -0
  121. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/combined.py +0 -0
  122. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/external.py +0 -0
  123. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/filtered.py +0 -0
  124. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/function.py +0 -0
  125. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/prefixed.py +0 -0
  126. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/prepared.py +0 -0
  127. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/renamed.py +0 -0
  128. {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.16}/pydantic_ai/toolsets/wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.14
3
+ Version: 1.0.16
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -29,11 +29,11 @@ Classifier: Topic :: Internet
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.10
31
31
  Requires-Dist: exceptiongroup; python_version < '3.11'
32
- Requires-Dist: genai-prices>=0.0.28
32
+ Requires-Dist: genai-prices>=0.0.30
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.14
36
+ Requires-Dist: pydantic-graph==1.0.16
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.14; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.16; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -9,7 +9,14 @@ from .agent import (
9
9
  UserPromptNode,
10
10
  capture_run_messages,
11
11
  )
12
- from .builtin_tools import CodeExecutionTool, UrlContextTool, WebSearchTool, WebSearchUserLocation
12
+ from .builtin_tools import (
13
+ CodeExecutionTool,
14
+ ImageGenerationTool,
15
+ MemoryTool,
16
+ UrlContextTool,
17
+ WebSearchTool,
18
+ WebSearchUserLocation,
19
+ )
13
20
  from .exceptions import (
14
21
  AgentRunError,
15
22
  ApprovalRequired,
@@ -30,11 +37,13 @@ from .messages import (
30
37
  BaseToolCallPart,
31
38
  BaseToolReturnPart,
32
39
  BinaryContent,
40
+ BinaryImage,
33
41
  BuiltinToolCallPart,
34
42
  BuiltinToolReturnPart,
35
43
  DocumentFormat,
36
44
  DocumentMediaType,
37
45
  DocumentUrl,
46
+ FilePart,
38
47
  FileUrl,
39
48
  FinalResultEvent,
40
49
  FinishReason,
@@ -79,6 +88,7 @@ from .profiles import (
79
88
  ModelProfile,
80
89
  ModelProfileSpec,
81
90
  )
91
+ from .run import AgentRun, AgentRunResult, AgentRunResultEvent
82
92
  from .settings import ModelSettings
83
93
  from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied
84
94
  from .toolsets import (
@@ -131,6 +141,7 @@ __all__ = (
131
141
  'DocumentMediaType',
132
142
  'DocumentUrl',
133
143
  'FileUrl',
144
+ 'FilePart',
134
145
  'FinalResultEvent',
135
146
  'FinishReason',
136
147
  'FunctionToolCallEvent',
@@ -139,6 +150,7 @@ __all__ = (
139
150
  'ImageFormat',
140
151
  'ImageMediaType',
141
152
  'ImageUrl',
153
+ 'BinaryImage',
142
154
  'ModelMessage',
143
155
  'ModelMessagesTypeAdapter',
144
156
  'ModelRequest',
@@ -197,6 +209,8 @@ __all__ = (
197
209
  'WebSearchUserLocation',
198
210
  'UrlContextTool',
199
211
  'CodeExecutionTool',
212
+ 'ImageGenerationTool',
213
+ 'MemoryTool',
200
214
  # output
201
215
  'ToolOutput',
202
216
  'NativeOutput',
@@ -211,5 +225,9 @@ __all__ = (
211
225
  'RunUsage',
212
226
  'RequestUsage',
213
227
  'UsageLimits',
228
+ # run
229
+ 'AgentRun',
230
+ 'AgentRunResult',
231
+ 'AgentRunResultEvent',
214
232
  )
215
233
  __version__ = _metadata_version('pydantic_ai_slim')
@@ -87,10 +87,10 @@ Can optionally accept a `RunContext` as a parameter.
87
87
  class GraphAgentState:
88
88
  """State kept across the execution of the agent graph."""
89
89
 
90
- message_history: list[_messages.ModelMessage]
91
- usage: _usage.RunUsage
92
- retries: int
93
- run_step: int
90
+ message_history: list[_messages.ModelMessage] = dataclasses.field(default_factory=list)
91
+ usage: _usage.RunUsage = dataclasses.field(default_factory=_usage.RunUsage)
92
+ retries: int = 0
93
+ run_step: int = 0
94
94
 
95
95
  def increment_retries(self, max_result_retries: int, error: BaseException | None = None) -> None:
96
96
  self.retries += 1
@@ -222,7 +222,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
222
222
  if self.user_prompt is None:
223
223
  # Skip ModelRequestNode and go directly to CallToolsNode
224
224
  return CallToolsNode[DepsT, NodeRunEndT](last_message)
225
- elif any(isinstance(part, _messages.ToolCallPart) for part in last_message.parts):
225
+ elif last_message.tool_calls:
226
226
  raise exceptions.UserError(
227
227
  'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
228
228
  )
@@ -230,7 +230,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
230
230
  # Build the run context after `ctx.deps.prompt` has been updated
231
231
  run_context = build_run_context(ctx)
232
232
 
233
- parts: list[_messages.ModelRequestPart] = []
234
233
  if messages:
235
234
  await self._reevaluate_dynamic_prompts(messages, run_context)
236
235
 
@@ -272,7 +271,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
272
271
  raise exceptions.UserError(
273
272
  'Tool call results were provided, but the message history does not contain a `ModelResponse`.'
274
273
  )
275
- if not any(isinstance(part, _messages.ToolCallPart) for part in last_model_response.parts):
274
+ if not last_model_response.tool_calls:
276
275
  raise exceptions.UserError(
277
276
  'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
278
277
  )
@@ -356,9 +355,6 @@ async def _prepare_request_parameters(
356
355
  if isinstance(output_schema, _output.NativeOutputSchema):
357
356
  output_object = output_schema.object_def
358
357
 
359
- # ToolOrTextOutputSchema, NativeOutputSchema, and PromptedOutputSchema all inherit from TextOutputSchema
360
- allow_text_output = isinstance(output_schema, _output.TextOutputSchema)
361
-
362
358
  function_tools: list[ToolDefinition] = []
363
359
  output_tools: list[ToolDefinition] = []
364
360
  for tool_def in ctx.deps.tool_manager.tool_defs:
@@ -373,7 +369,8 @@ async def _prepare_request_parameters(
373
369
  output_mode=output_schema.mode,
374
370
  output_tools=output_tools,
375
371
  output_object=output_object,
376
- allow_text_output=allow_text_output,
372
+ allow_text_output=output_schema.allows_text,
373
+ allow_image_output=output_schema.allows_image,
377
374
  )
378
375
 
379
376
 
@@ -543,27 +540,58 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
543
540
  if self._events_iterator is None:
544
541
  # Ensure that the stream is only run once
545
542
 
543
+ output_schema = ctx.deps.output_schema
544
+
546
545
  async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
546
+ if not self.model_response.parts:
547
+ # we got an empty response.
548
+ # this sometimes happens with anthropic (and perhaps other models)
549
+ # when the model has already returned text along side tool calls
550
+ if text_processor := output_schema.text_processor:
551
+ # in this scenario, if text responses are allowed, we return text from the most recent model
552
+ # response, if any
553
+ for message in reversed(ctx.state.message_history):
554
+ if isinstance(message, _messages.ModelResponse):
555
+ text = ''
556
+ for part in message.parts:
557
+ if isinstance(part, _messages.TextPart):
558
+ text += part.content
559
+ elif isinstance(part, _messages.BuiltinToolCallPart):
560
+ # Text parts before a built-in tool call are essentially thoughts,
561
+ # not part of the final result output, so we reset the accumulated text
562
+ text = '' # pragma: no cover
563
+ if text:
564
+ self._next_node = await self._handle_text_response(ctx, text, text_processor)
565
+ return
566
+
567
+ # Go back to the model request node with an empty request, which means we'll essentially
568
+ # resubmit the most recent request that resulted in an empty response,
569
+ # as the empty response and request will not create any items in the API payload,
570
+ # in the hope the model will return a non-empty response this time.
571
+ ctx.state.increment_retries(ctx.deps.max_result_retries)
572
+ self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
573
+ return
574
+
547
575
  text = ''
548
576
  tool_calls: list[_messages.ToolCallPart] = []
549
- invisible_parts: bool = False
577
+ files: list[_messages.BinaryContent] = []
550
578
 
551
579
  for part in self.model_response.parts:
552
580
  if isinstance(part, _messages.TextPart):
553
581
  text += part.content
554
582
  elif isinstance(part, _messages.ToolCallPart):
555
583
  tool_calls.append(part)
584
+ elif isinstance(part, _messages.FilePart):
585
+ files.append(part.content)
556
586
  elif isinstance(part, _messages.BuiltinToolCallPart):
557
587
  # Text parts before a built-in tool call are essentially thoughts,
558
588
  # not part of the final result output, so we reset the accumulated text
559
589
  text = ''
560
- invisible_parts = True
561
590
  yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
562
591
  elif isinstance(part, _messages.BuiltinToolReturnPart):
563
- invisible_parts = True
564
592
  yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
565
593
  elif isinstance(part, _messages.ThinkingPart):
566
- invisible_parts = True
594
+ pass
567
595
  else:
568
596
  assert_never(part)
569
597
 
@@ -572,47 +600,35 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
572
600
  # This accounts for cases like anthropic returns that might contain a text response
573
601
  # and a tool call response, where the text response just indicates the tool call will happen.
574
602
  try:
603
+ alternatives: list[str] = []
575
604
  if tool_calls:
576
605
  async for event in self._handle_tool_calls(ctx, tool_calls):
577
606
  yield event
578
- elif text:
579
- # No events are emitted during the handling of text responses, so we don't need to yield anything
580
- self._next_node = await self._handle_text_response(ctx, text)
581
- elif invisible_parts:
582
- # handle responses with only thinking or built-in tool parts.
583
- # this can happen with models that support thinking mode when they don't provide
584
- # actionable output alongside their thinking content. so we tell the model to try again.
585
- m = _messages.RetryPromptPart(
586
- content='Responses without text or tool calls are not permitted.',
587
- )
588
- raise ToolRetryError(m)
607
+ return
608
+ elif output_schema.toolset:
609
+ alternatives.append('include your response in a tool call')
589
610
  else:
590
- # we got an empty response with no tool calls, text, thinking, or built-in tool calls.
591
- # this sometimes happens with anthropic (and perhaps other models)
592
- # when the model has already returned text along side tool calls
593
- # in this scenario, if text responses are allowed, we return text from the most recent model
594
- # response, if any
595
- if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
596
- for message in reversed(ctx.state.message_history):
597
- if isinstance(message, _messages.ModelResponse):
598
- text = ''
599
- for part in message.parts:
600
- if isinstance(part, _messages.TextPart):
601
- text += part.content
602
- elif isinstance(part, _messages.BuiltinToolCallPart):
603
- # Text parts before a built-in tool call are essentially thoughts,
604
- # not part of the final result output, so we reset the accumulated text
605
- text = '' # pragma: no cover
606
- if text:
607
- self._next_node = await self._handle_text_response(ctx, text)
608
- return
609
-
610
- # Go back to the model request node with an empty request, which means we'll essentially
611
- # resubmit the most recent request that resulted in an empty response,
612
- # as the empty response and request will not create any items in the API payload,
613
- # in the hope the model will return a non-empty response this time.
614
- ctx.state.increment_retries(ctx.deps.max_result_retries)
615
- self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
611
+ alternatives.append('call a tool')
612
+
613
+ if output_schema.allows_image:
614
+ if image := next((file for file in files if isinstance(file, _messages.BinaryImage)), None):
615
+ self._next_node = await self._handle_image_response(ctx, image)
616
+ return
617
+ alternatives.append('return an image')
618
+
619
+ if text_processor := output_schema.text_processor:
620
+ if text:
621
+ self._next_node = await self._handle_text_response(ctx, text, text_processor)
622
+ return
623
+ alternatives.insert(0, 'return text')
624
+
625
+ # handle responses with only parts that don't constitute output.
626
+ # This can happen with models that support thinking mode when they don't provide
627
+ # actionable output alongside their thinking content. so we tell the model to try again.
628
+ m = _messages.RetryPromptPart(
629
+ content=f'Please {" or ".join(alternatives)}.',
630
+ )
631
+ raise ToolRetryError(m)
616
632
  except ToolRetryError as e:
617
633
  ctx.state.increment_retries(ctx.deps.max_result_retries, e)
618
634
  self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
@@ -655,6 +671,28 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
655
671
  _messages.ModelRequest(parts=output_parts, instructions=instructions)
656
672
  )
657
673
 
674
+ async def _handle_text_response(
675
+ self,
676
+ ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
677
+ text: str,
678
+ text_processor: _output.BaseOutputProcessor[NodeRunEndT],
679
+ ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
680
+ run_context = build_run_context(ctx)
681
+
682
+ result_data = await text_processor.process(text, run_context)
683
+
684
+ for validator in ctx.deps.output_validators:
685
+ result_data = await validator.validate(result_data, run_context)
686
+ return self._handle_final_result(ctx, result.FinalResult(result_data), [])
687
+
688
+ async def _handle_image_response(
689
+ self,
690
+ ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
691
+ image: _messages.BinaryImage,
692
+ ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
693
+ result_data = cast(NodeRunEndT, image)
694
+ return self._handle_final_result(ctx, result.FinalResult(result_data), [])
695
+
658
696
  def _handle_final_result(
659
697
  self,
660
698
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
@@ -669,26 +707,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
669
707
 
670
708
  return End(final_result)
671
709
 
672
- async def _handle_text_response(
673
- self,
674
- ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
675
- text: str,
676
- ) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
677
- output_schema = ctx.deps.output_schema
678
- run_context = build_run_context(ctx)
679
-
680
- if isinstance(output_schema, _output.TextOutputSchema):
681
- result_data = await output_schema.process(text, run_context)
682
- else:
683
- m = _messages.RetryPromptPart(
684
- content='Plain text responses are not permitted, please include your response in a tool call',
685
- )
686
- raise ToolRetryError(m)
687
-
688
- for validator in ctx.deps.output_validators:
689
- result_data = await validator.validate(result_data, run_context)
690
- return self._handle_final_result(ctx, result.FinalResult(result_data), [])
691
-
692
710
  __repr__ = dataclasses_no_defaults_repr
693
711
 
694
712
 
@@ -777,16 +795,14 @@ async def process_tool_calls( # noqa: C901
777
795
  # Then, we handle function tool calls
778
796
  calls_to_run: list[_messages.ToolCallPart] = []
779
797
  if final_result and ctx.deps.end_strategy == 'early':
780
- output_parts.extend(
781
- [
798
+ for call in tool_calls_by_kind['function']:
799
+ output_parts.append(
782
800
  _messages.ToolReturnPart(
783
801
  tool_name=call.tool_name,
784
802
  content='Tool not executed - a final result was already processed.',
785
803
  tool_call_id=call.tool_call_id,
786
804
  )
787
- for call in tool_calls_by_kind['function']
788
- ]
789
- )
805
+ )
790
806
  else:
791
807
  calls_to_run.extend(tool_calls_by_kind['function'])
792
808
 
@@ -821,6 +837,7 @@ async def process_tool_calls( # noqa: C901
821
837
  tool_calls=calls_to_run,
822
838
  tool_call_results=calls_to_run_results,
823
839
  tracer=ctx.deps.tracer,
840
+ usage=ctx.state.usage,
824
841
  usage_limits=ctx.deps.usage_limits,
825
842
  output_parts=output_parts,
826
843
  output_deferred_calls=deferred_calls,
@@ -831,14 +848,17 @@ async def process_tool_calls( # noqa: C901
831
848
  if tool_call_results is None:
832
849
  calls = [*tool_calls_by_kind['external'], *tool_calls_by_kind['unapproved']]
833
850
  if final_result:
834
- for call in calls:
835
- output_parts.append(
836
- _messages.ToolReturnPart(
837
- tool_name=call.tool_name,
838
- content='Tool not executed - a final result was already processed.',
839
- tool_call_id=call.tool_call_id,
851
+ # If the run was already determined to end on deferred tool calls,
852
+ # we shouldn't insert return parts as the deferred tools will still get a real result.
853
+ if not isinstance(final_result.output, _output.DeferredToolRequests):
854
+ for call in calls:
855
+ output_parts.append(
856
+ _messages.ToolReturnPart(
857
+ tool_name=call.tool_name,
858
+ content='Tool not executed - a final result was already processed.',
859
+ tool_call_id=call.tool_call_id,
860
+ )
840
861
  )
841
- )
842
862
  elif calls:
843
863
  deferred_calls['external'].extend(tool_calls_by_kind['external'])
844
864
  deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved'])
@@ -867,7 +887,8 @@ async def _call_tools(
867
887
  tool_calls: list[_messages.ToolCallPart],
868
888
  tool_call_results: dict[str, DeferredToolResult],
869
889
  tracer: Tracer,
870
- usage_limits: _usage.UsageLimits | None,
890
+ usage: _usage.RunUsage,
891
+ usage_limits: _usage.UsageLimits,
871
892
  output_parts: list[_messages.ModelRequestPart],
872
893
  output_deferred_calls: dict[Literal['external', 'unapproved'], list[_messages.ToolCallPart]],
873
894
  ) -> AsyncIterator[_messages.HandleResponseEvent]:
@@ -875,6 +896,11 @@ async def _call_tools(
875
896
  user_parts_by_index: dict[int, _messages.UserPromptPart] = {}
876
897
  deferred_calls_by_index: dict[int, Literal['external', 'unapproved']] = {}
877
898
 
899
+ if usage_limits.tool_calls_limit is not None:
900
+ projected_usage = deepcopy(usage)
901
+ projected_usage.tool_calls += len(tool_calls)
902
+ usage_limits.check_before_tool_call(projected_usage)
903
+
878
904
  for call in tool_calls:
879
905
  yield _messages.FunctionToolCallEvent(call)
880
906
 
@@ -888,13 +914,19 @@ async def _call_tools(
888
914
 
889
915
  async def handle_call_or_result(
890
916
  coro_or_task: Awaitable[
891
- tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]
917
+ tuple[
918
+ _messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
919
+ ]
892
920
  ]
893
- | Task[tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]],
921
+ | Task[
922
+ tuple[
923
+ _messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
924
+ ]
925
+ ],
894
926
  index: int,
895
927
  ) -> _messages.HandleResponseEvent | None:
896
928
  try:
897
- tool_part, tool_user_part = (
929
+ tool_part, tool_user_content = (
898
930
  (await coro_or_task) if inspect.isawaitable(coro_or_task) else coro_or_task.result()
899
931
  )
900
932
  except exceptions.CallDeferred:
@@ -903,15 +935,15 @@ async def _call_tools(
903
935
  deferred_calls_by_index[index] = 'unapproved'
904
936
  else:
905
937
  tool_parts_by_index[index] = tool_part
906
- if tool_user_part:
907
- user_parts_by_index[index] = tool_user_part
938
+ if tool_user_content:
939
+ user_parts_by_index[index] = _messages.UserPromptPart(content=tool_user_content)
908
940
 
909
- return _messages.FunctionToolResultEvent(tool_part)
941
+ return _messages.FunctionToolResultEvent(tool_part, content=tool_user_content)
910
942
 
911
943
  if tool_manager.should_call_sequentially(tool_calls):
912
944
  for index, call in enumerate(tool_calls):
913
945
  if event := await handle_call_or_result(
914
- _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
946
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
915
947
  index,
916
948
  ):
917
949
  yield event
@@ -919,7 +951,7 @@ async def _call_tools(
919
951
  else:
920
952
  tasks = [
921
953
  asyncio.create_task(
922
- _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
954
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
923
955
  name=call.tool_name,
924
956
  )
925
957
  for call in tool_calls
@@ -946,15 +978,14 @@ async def _call_tool(
946
978
  tool_manager: ToolManager[DepsT],
947
979
  tool_call: _messages.ToolCallPart,
948
980
  tool_call_result: DeferredToolResult | None,
949
- usage_limits: _usage.UsageLimits | None,
950
- ) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]:
981
+ ) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None]:
951
982
  try:
952
983
  if tool_call_result is None:
953
- tool_result = await tool_manager.handle_call(tool_call, usage_limits=usage_limits)
984
+ tool_result = await tool_manager.handle_call(tool_call)
954
985
  elif isinstance(tool_call_result, ToolApproved):
955
986
  if tool_call_result.override_args is not None:
956
987
  tool_call = dataclasses.replace(tool_call, args=tool_call_result.override_args)
957
- tool_result = await tool_manager.handle_call(tool_call, usage_limits=usage_limits)
988
+ tool_result = await tool_manager.handle_call(tool_call)
958
989
  elif isinstance(tool_call_result, ToolDenied):
959
990
  return _messages.ToolReturnPart(
960
991
  tool_name=tool_call.tool_name,
@@ -1024,14 +1055,7 @@ async def _call_tool(
1024
1055
  metadata=tool_return.metadata,
1025
1056
  )
1026
1057
 
1027
- user_part: _messages.UserPromptPart | None = None
1028
- if tool_return.content:
1029
- user_part = _messages.UserPromptPart(
1030
- content=tool_return.content,
1031
- part_kind='user-prompt',
1032
- )
1033
-
1034
- return return_part, user_part
1058
+ return return_part, tool_return.content or None
1035
1059
 
1036
1060
 
1037
1061
  @dataclasses.dataclass
@@ -18,7 +18,7 @@ from . import __version__
18
18
  from ._run_context import AgentDepsT
19
19
  from .agent import AbstractAgent, Agent
20
20
  from .exceptions import UserError
21
- from .messages import ModelMessage, TextPart
21
+ from .messages import ModelMessage, ModelResponse
22
22
  from .models import KnownModelName, infer_model
23
23
  from .output import OutputDataT
24
24
 
@@ -228,7 +228,7 @@ async def run_chat(
228
228
  prog_name: str,
229
229
  config_dir: Path | None = None,
230
230
  deps: AgentDepsT = None,
231
- message_history: list[ModelMessage] | None = None,
231
+ message_history: Sequence[ModelMessage] | None = None,
232
232
  ) -> int:
233
233
  prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
234
234
  prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
@@ -236,7 +236,7 @@ async def run_chat(
236
236
  session: PromptSession[Any] = PromptSession(history=FileHistory(str(prompt_history_path)))
237
237
 
238
238
  multiline = False
239
- messages: list[ModelMessage] = message_history[:] if message_history else []
239
+ messages: list[ModelMessage] = list(message_history) if message_history else []
240
240
 
241
241
  while True:
242
242
  try:
@@ -272,7 +272,7 @@ async def ask_agent(
272
272
  console: Console,
273
273
  code_theme: str,
274
274
  deps: AgentDepsT = None,
275
- messages: list[ModelMessage] | None = None,
275
+ messages: Sequence[ModelMessage] | None = None,
276
276
  ) -> list[ModelMessage]:
277
277
  status = Status('[dim]Working on it…[/dim]', console=console)
278
278
 
@@ -351,14 +351,11 @@ def handle_slash_command(
351
351
  console.print('[dim]Exiting…[/dim]')
352
352
  return 0, multiline
353
353
  elif ident_prompt == '/cp':
354
- try:
355
- parts = messages[-1].parts
356
- except IndexError:
354
+ if not messages or not isinstance(messages[-1], ModelResponse):
357
355
  console.print('[dim]No output available to copy.[/dim]')
358
356
  else:
359
- text_to_copy = ''.join(part.content for part in parts if isinstance(part, TextPart))
360
- text_to_copy = text_to_copy.strip()
361
- if text_to_copy:
357
+ text_to_copy = messages[-1].text
358
+ if text_to_copy and (text_to_copy := text_to_copy.strip()):
362
359
  pyperclip.copy(text_to_copy)
363
360
  console.print('[dim]Copied last output to clipboard.[/dim]')
364
361
  else: