pydantic-ai-slim 1.0.14__tar.gz → 1.0.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/PKG-INFO +3 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/__init__.py +19 -1
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_agent_graph.py +116 -93
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_cli.py +4 -7
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_output.py +236 -192
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_parts_manager.py +8 -42
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_tool_manager.py +9 -16
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/abstract.py +169 -1
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/builtin_tools.py +82 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/direct.py +7 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_agent.py +106 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_agent.py +123 -6
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_model.py +8 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/format_prompt.py +4 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/mcp.py +20 -10
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/messages.py +149 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/__init__.py +15 -1
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/anthropic.py +7 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/cohere.py +4 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/function.py +7 -4
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/gemini.py +8 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/google.py +56 -23
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/groq.py +11 -5
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/huggingface.py +5 -3
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/mistral.py +6 -8
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/openai.py +197 -57
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/test.py +4 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/output.py +5 -2
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/__init__.py +2 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/google.py +5 -2
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/openai.py +2 -1
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/result.py +46 -30
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/run.py +35 -7
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/usage.py +5 -4
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/.gitignore +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/LICENSE +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/README.md +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_instrumentation.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_json_schema.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_otel_messages.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/agent/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/harmony.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/cerebras.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/gateway.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/litellm.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/ollama.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/approval_required.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/external.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.14 → pydantic_ai_slim-1.0.15}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.15
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.28
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.15
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.15; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -9,7 +9,14 @@ from .agent import (
|
|
|
9
9
|
UserPromptNode,
|
|
10
10
|
capture_run_messages,
|
|
11
11
|
)
|
|
12
|
-
from .builtin_tools import
|
|
12
|
+
from .builtin_tools import (
|
|
13
|
+
CodeExecutionTool,
|
|
14
|
+
ImageGenerationTool,
|
|
15
|
+
MemoryTool,
|
|
16
|
+
UrlContextTool,
|
|
17
|
+
WebSearchTool,
|
|
18
|
+
WebSearchUserLocation,
|
|
19
|
+
)
|
|
13
20
|
from .exceptions import (
|
|
14
21
|
AgentRunError,
|
|
15
22
|
ApprovalRequired,
|
|
@@ -30,11 +37,13 @@ from .messages import (
|
|
|
30
37
|
BaseToolCallPart,
|
|
31
38
|
BaseToolReturnPart,
|
|
32
39
|
BinaryContent,
|
|
40
|
+
BinaryImage,
|
|
33
41
|
BuiltinToolCallPart,
|
|
34
42
|
BuiltinToolReturnPart,
|
|
35
43
|
DocumentFormat,
|
|
36
44
|
DocumentMediaType,
|
|
37
45
|
DocumentUrl,
|
|
46
|
+
FilePart,
|
|
38
47
|
FileUrl,
|
|
39
48
|
FinalResultEvent,
|
|
40
49
|
FinishReason,
|
|
@@ -79,6 +88,7 @@ from .profiles import (
|
|
|
79
88
|
ModelProfile,
|
|
80
89
|
ModelProfileSpec,
|
|
81
90
|
)
|
|
91
|
+
from .run import AgentRun, AgentRunResult, AgentRunResultEvent
|
|
82
92
|
from .settings import ModelSettings
|
|
83
93
|
from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied
|
|
84
94
|
from .toolsets import (
|
|
@@ -131,6 +141,7 @@ __all__ = (
|
|
|
131
141
|
'DocumentMediaType',
|
|
132
142
|
'DocumentUrl',
|
|
133
143
|
'FileUrl',
|
|
144
|
+
'FilePart',
|
|
134
145
|
'FinalResultEvent',
|
|
135
146
|
'FinishReason',
|
|
136
147
|
'FunctionToolCallEvent',
|
|
@@ -139,6 +150,7 @@ __all__ = (
|
|
|
139
150
|
'ImageFormat',
|
|
140
151
|
'ImageMediaType',
|
|
141
152
|
'ImageUrl',
|
|
153
|
+
'BinaryImage',
|
|
142
154
|
'ModelMessage',
|
|
143
155
|
'ModelMessagesTypeAdapter',
|
|
144
156
|
'ModelRequest',
|
|
@@ -197,6 +209,8 @@ __all__ = (
|
|
|
197
209
|
'WebSearchUserLocation',
|
|
198
210
|
'UrlContextTool',
|
|
199
211
|
'CodeExecutionTool',
|
|
212
|
+
'ImageGenerationTool',
|
|
213
|
+
'MemoryTool',
|
|
200
214
|
# output
|
|
201
215
|
'ToolOutput',
|
|
202
216
|
'NativeOutput',
|
|
@@ -211,5 +225,9 @@ __all__ = (
|
|
|
211
225
|
'RunUsage',
|
|
212
226
|
'RequestUsage',
|
|
213
227
|
'UsageLimits',
|
|
228
|
+
# run
|
|
229
|
+
'AgentRun',
|
|
230
|
+
'AgentRunResult',
|
|
231
|
+
'AgentRunResultEvent',
|
|
214
232
|
)
|
|
215
233
|
__version__ = _metadata_version('pydantic_ai_slim')
|
|
@@ -87,10 +87,10 @@ Can optionally accept a `RunContext` as a parameter.
|
|
|
87
87
|
class GraphAgentState:
|
|
88
88
|
"""State kept across the execution of the agent graph."""
|
|
89
89
|
|
|
90
|
-
message_history: list[_messages.ModelMessage]
|
|
91
|
-
usage: _usage.RunUsage
|
|
92
|
-
retries: int
|
|
93
|
-
run_step: int
|
|
90
|
+
message_history: list[_messages.ModelMessage] = dataclasses.field(default_factory=list)
|
|
91
|
+
usage: _usage.RunUsage = dataclasses.field(default_factory=_usage.RunUsage)
|
|
92
|
+
retries: int = 0
|
|
93
|
+
run_step: int = 0
|
|
94
94
|
|
|
95
95
|
def increment_retries(self, max_result_retries: int, error: BaseException | None = None) -> None:
|
|
96
96
|
self.retries += 1
|
|
@@ -222,7 +222,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
222
222
|
if self.user_prompt is None:
|
|
223
223
|
# Skip ModelRequestNode and go directly to CallToolsNode
|
|
224
224
|
return CallToolsNode[DepsT, NodeRunEndT](last_message)
|
|
225
|
-
elif
|
|
225
|
+
elif last_message.tool_calls:
|
|
226
226
|
raise exceptions.UserError(
|
|
227
227
|
'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
|
|
228
228
|
)
|
|
@@ -230,7 +230,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
230
230
|
# Build the run context after `ctx.deps.prompt` has been updated
|
|
231
231
|
run_context = build_run_context(ctx)
|
|
232
232
|
|
|
233
|
-
parts: list[_messages.ModelRequestPart] = []
|
|
234
233
|
if messages:
|
|
235
234
|
await self._reevaluate_dynamic_prompts(messages, run_context)
|
|
236
235
|
|
|
@@ -272,7 +271,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
272
271
|
raise exceptions.UserError(
|
|
273
272
|
'Tool call results were provided, but the message history does not contain a `ModelResponse`.'
|
|
274
273
|
)
|
|
275
|
-
if not
|
|
274
|
+
if not last_model_response.tool_calls:
|
|
276
275
|
raise exceptions.UserError(
|
|
277
276
|
'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
|
|
278
277
|
)
|
|
@@ -356,9 +355,6 @@ async def _prepare_request_parameters(
|
|
|
356
355
|
if isinstance(output_schema, _output.NativeOutputSchema):
|
|
357
356
|
output_object = output_schema.object_def
|
|
358
357
|
|
|
359
|
-
# ToolOrTextOutputSchema, NativeOutputSchema, and PromptedOutputSchema all inherit from TextOutputSchema
|
|
360
|
-
allow_text_output = isinstance(output_schema, _output.TextOutputSchema)
|
|
361
|
-
|
|
362
358
|
function_tools: list[ToolDefinition] = []
|
|
363
359
|
output_tools: list[ToolDefinition] = []
|
|
364
360
|
for tool_def in ctx.deps.tool_manager.tool_defs:
|
|
@@ -373,7 +369,8 @@ async def _prepare_request_parameters(
|
|
|
373
369
|
output_mode=output_schema.mode,
|
|
374
370
|
output_tools=output_tools,
|
|
375
371
|
output_object=output_object,
|
|
376
|
-
allow_text_output=
|
|
372
|
+
allow_text_output=output_schema.allows_text,
|
|
373
|
+
allow_image_output=output_schema.allows_image,
|
|
377
374
|
)
|
|
378
375
|
|
|
379
376
|
|
|
@@ -543,27 +540,58 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
543
540
|
if self._events_iterator is None:
|
|
544
541
|
# Ensure that the stream is only run once
|
|
545
542
|
|
|
543
|
+
output_schema = ctx.deps.output_schema
|
|
544
|
+
|
|
546
545
|
async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
|
|
546
|
+
if not self.model_response.parts:
|
|
547
|
+
# we got an empty response.
|
|
548
|
+
# this sometimes happens with anthropic (and perhaps other models)
|
|
549
|
+
# when the model has already returned text along side tool calls
|
|
550
|
+
if text_processor := output_schema.text_processor:
|
|
551
|
+
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
552
|
+
# response, if any
|
|
553
|
+
for message in reversed(ctx.state.message_history):
|
|
554
|
+
if isinstance(message, _messages.ModelResponse):
|
|
555
|
+
text = ''
|
|
556
|
+
for part in message.parts:
|
|
557
|
+
if isinstance(part, _messages.TextPart):
|
|
558
|
+
text += part.content
|
|
559
|
+
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
560
|
+
# Text parts before a built-in tool call are essentially thoughts,
|
|
561
|
+
# not part of the final result output, so we reset the accumulated text
|
|
562
|
+
text = '' # pragma: no cover
|
|
563
|
+
if text:
|
|
564
|
+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
|
|
565
|
+
return
|
|
566
|
+
|
|
567
|
+
# Go back to the model request node with an empty request, which means we'll essentially
|
|
568
|
+
# resubmit the most recent request that resulted in an empty response,
|
|
569
|
+
# as the empty response and request will not create any items in the API payload,
|
|
570
|
+
# in the hope the model will return a non-empty response this time.
|
|
571
|
+
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
572
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
|
|
573
|
+
return
|
|
574
|
+
|
|
547
575
|
text = ''
|
|
548
576
|
tool_calls: list[_messages.ToolCallPart] = []
|
|
549
|
-
|
|
577
|
+
files: list[_messages.BinaryContent] = []
|
|
550
578
|
|
|
551
579
|
for part in self.model_response.parts:
|
|
552
580
|
if isinstance(part, _messages.TextPart):
|
|
553
581
|
text += part.content
|
|
554
582
|
elif isinstance(part, _messages.ToolCallPart):
|
|
555
583
|
tool_calls.append(part)
|
|
584
|
+
elif isinstance(part, _messages.FilePart):
|
|
585
|
+
files.append(part.content)
|
|
556
586
|
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
557
587
|
# Text parts before a built-in tool call are essentially thoughts,
|
|
558
588
|
# not part of the final result output, so we reset the accumulated text
|
|
559
589
|
text = ''
|
|
560
|
-
invisible_parts = True
|
|
561
590
|
yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
|
|
562
591
|
elif isinstance(part, _messages.BuiltinToolReturnPart):
|
|
563
|
-
invisible_parts = True
|
|
564
592
|
yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
|
|
565
593
|
elif isinstance(part, _messages.ThinkingPart):
|
|
566
|
-
|
|
594
|
+
pass
|
|
567
595
|
else:
|
|
568
596
|
assert_never(part)
|
|
569
597
|
|
|
@@ -572,47 +600,35 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
572
600
|
# This accounts for cases like anthropic returns that might contain a text response
|
|
573
601
|
# and a tool call response, where the text response just indicates the tool call will happen.
|
|
574
602
|
try:
|
|
603
|
+
alternatives: list[str] = []
|
|
575
604
|
if tool_calls:
|
|
576
605
|
async for event in self._handle_tool_calls(ctx, tool_calls):
|
|
577
606
|
yield event
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
elif invisible_parts:
|
|
582
|
-
# handle responses with only thinking or built-in tool parts.
|
|
583
|
-
# this can happen with models that support thinking mode when they don't provide
|
|
584
|
-
# actionable output alongside their thinking content. so we tell the model to try again.
|
|
585
|
-
m = _messages.RetryPromptPart(
|
|
586
|
-
content='Responses without text or tool calls are not permitted.',
|
|
587
|
-
)
|
|
588
|
-
raise ToolRetryError(m)
|
|
607
|
+
return
|
|
608
|
+
elif output_schema.toolset:
|
|
609
|
+
alternatives.append('include your response in a tool call')
|
|
589
610
|
else:
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
# resubmit the most recent request that resulted in an empty response,
|
|
612
|
-
# as the empty response and request will not create any items in the API payload,
|
|
613
|
-
# in the hope the model will return a non-empty response this time.
|
|
614
|
-
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
615
|
-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
|
|
611
|
+
alternatives.append('call a tool')
|
|
612
|
+
|
|
613
|
+
if output_schema.allows_image:
|
|
614
|
+
if image := next((file for file in files if isinstance(file, _messages.BinaryImage)), None):
|
|
615
|
+
self._next_node = await self._handle_image_response(ctx, image)
|
|
616
|
+
return
|
|
617
|
+
alternatives.append('return an image')
|
|
618
|
+
|
|
619
|
+
if text_processor := output_schema.text_processor:
|
|
620
|
+
if text:
|
|
621
|
+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
|
|
622
|
+
return
|
|
623
|
+
alternatives.insert(0, 'return text')
|
|
624
|
+
|
|
625
|
+
# handle responses with only parts that don't constitute output.
|
|
626
|
+
# This can happen with models that support thinking mode when they don't provide
|
|
627
|
+
# actionable output alongside their thinking content. so we tell the model to try again.
|
|
628
|
+
m = _messages.RetryPromptPart(
|
|
629
|
+
content=f'Please {" or ".join(alternatives)}.',
|
|
630
|
+
)
|
|
631
|
+
raise ToolRetryError(m)
|
|
616
632
|
except ToolRetryError as e:
|
|
617
633
|
ctx.state.increment_retries(ctx.deps.max_result_retries, e)
|
|
618
634
|
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
|
|
@@ -655,6 +671,28 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
655
671
|
_messages.ModelRequest(parts=output_parts, instructions=instructions)
|
|
656
672
|
)
|
|
657
673
|
|
|
674
|
+
async def _handle_text_response(
|
|
675
|
+
self,
|
|
676
|
+
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
677
|
+
text: str,
|
|
678
|
+
text_processor: _output.BaseOutputProcessor[NodeRunEndT],
|
|
679
|
+
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
680
|
+
run_context = build_run_context(ctx)
|
|
681
|
+
|
|
682
|
+
result_data = await text_processor.process(text, run_context)
|
|
683
|
+
|
|
684
|
+
for validator in ctx.deps.output_validators:
|
|
685
|
+
result_data = await validator.validate(result_data, run_context)
|
|
686
|
+
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
687
|
+
|
|
688
|
+
async def _handle_image_response(
|
|
689
|
+
self,
|
|
690
|
+
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
691
|
+
image: _messages.BinaryImage,
|
|
692
|
+
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
693
|
+
result_data = cast(NodeRunEndT, image)
|
|
694
|
+
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
695
|
+
|
|
658
696
|
def _handle_final_result(
|
|
659
697
|
self,
|
|
660
698
|
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
@@ -669,26 +707,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
669
707
|
|
|
670
708
|
return End(final_result)
|
|
671
709
|
|
|
672
|
-
async def _handle_text_response(
|
|
673
|
-
self,
|
|
674
|
-
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
675
|
-
text: str,
|
|
676
|
-
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
677
|
-
output_schema = ctx.deps.output_schema
|
|
678
|
-
run_context = build_run_context(ctx)
|
|
679
|
-
|
|
680
|
-
if isinstance(output_schema, _output.TextOutputSchema):
|
|
681
|
-
result_data = await output_schema.process(text, run_context)
|
|
682
|
-
else:
|
|
683
|
-
m = _messages.RetryPromptPart(
|
|
684
|
-
content='Plain text responses are not permitted, please include your response in a tool call',
|
|
685
|
-
)
|
|
686
|
-
raise ToolRetryError(m)
|
|
687
|
-
|
|
688
|
-
for validator in ctx.deps.output_validators:
|
|
689
|
-
result_data = await validator.validate(result_data, run_context)
|
|
690
|
-
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
691
|
-
|
|
692
710
|
__repr__ = dataclasses_no_defaults_repr
|
|
693
711
|
|
|
694
712
|
|
|
@@ -821,6 +839,7 @@ async def process_tool_calls( # noqa: C901
|
|
|
821
839
|
tool_calls=calls_to_run,
|
|
822
840
|
tool_call_results=calls_to_run_results,
|
|
823
841
|
tracer=ctx.deps.tracer,
|
|
842
|
+
usage=ctx.state.usage,
|
|
824
843
|
usage_limits=ctx.deps.usage_limits,
|
|
825
844
|
output_parts=output_parts,
|
|
826
845
|
output_deferred_calls=deferred_calls,
|
|
@@ -867,7 +886,8 @@ async def _call_tools(
|
|
|
867
886
|
tool_calls: list[_messages.ToolCallPart],
|
|
868
887
|
tool_call_results: dict[str, DeferredToolResult],
|
|
869
888
|
tracer: Tracer,
|
|
870
|
-
|
|
889
|
+
usage: _usage.RunUsage,
|
|
890
|
+
usage_limits: _usage.UsageLimits,
|
|
871
891
|
output_parts: list[_messages.ModelRequestPart],
|
|
872
892
|
output_deferred_calls: dict[Literal['external', 'unapproved'], list[_messages.ToolCallPart]],
|
|
873
893
|
) -> AsyncIterator[_messages.HandleResponseEvent]:
|
|
@@ -875,6 +895,11 @@ async def _call_tools(
|
|
|
875
895
|
user_parts_by_index: dict[int, _messages.UserPromptPart] = {}
|
|
876
896
|
deferred_calls_by_index: dict[int, Literal['external', 'unapproved']] = {}
|
|
877
897
|
|
|
898
|
+
if usage_limits.tool_calls_limit is not None:
|
|
899
|
+
projected_usage = deepcopy(usage)
|
|
900
|
+
projected_usage.tool_calls += len(tool_calls)
|
|
901
|
+
usage_limits.check_before_tool_call(projected_usage)
|
|
902
|
+
|
|
878
903
|
for call in tool_calls:
|
|
879
904
|
yield _messages.FunctionToolCallEvent(call)
|
|
880
905
|
|
|
@@ -888,13 +913,19 @@ async def _call_tools(
|
|
|
888
913
|
|
|
889
914
|
async def handle_call_or_result(
|
|
890
915
|
coro_or_task: Awaitable[
|
|
891
|
-
tuple[
|
|
916
|
+
tuple[
|
|
917
|
+
_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
|
|
918
|
+
]
|
|
892
919
|
]
|
|
893
|
-
| Task[
|
|
920
|
+
| Task[
|
|
921
|
+
tuple[
|
|
922
|
+
_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
|
|
923
|
+
]
|
|
924
|
+
],
|
|
894
925
|
index: int,
|
|
895
926
|
) -> _messages.HandleResponseEvent | None:
|
|
896
927
|
try:
|
|
897
|
-
tool_part,
|
|
928
|
+
tool_part, tool_user_content = (
|
|
898
929
|
(await coro_or_task) if inspect.isawaitable(coro_or_task) else coro_or_task.result()
|
|
899
930
|
)
|
|
900
931
|
except exceptions.CallDeferred:
|
|
@@ -903,15 +934,15 @@ async def _call_tools(
|
|
|
903
934
|
deferred_calls_by_index[index] = 'unapproved'
|
|
904
935
|
else:
|
|
905
936
|
tool_parts_by_index[index] = tool_part
|
|
906
|
-
if
|
|
907
|
-
user_parts_by_index[index] =
|
|
937
|
+
if tool_user_content:
|
|
938
|
+
user_parts_by_index[index] = _messages.UserPromptPart(content=tool_user_content)
|
|
908
939
|
|
|
909
|
-
return _messages.FunctionToolResultEvent(tool_part)
|
|
940
|
+
return _messages.FunctionToolResultEvent(tool_part, content=tool_user_content)
|
|
910
941
|
|
|
911
942
|
if tool_manager.should_call_sequentially(tool_calls):
|
|
912
943
|
for index, call in enumerate(tool_calls):
|
|
913
944
|
if event := await handle_call_or_result(
|
|
914
|
-
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)
|
|
945
|
+
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
|
|
915
946
|
index,
|
|
916
947
|
):
|
|
917
948
|
yield event
|
|
@@ -919,7 +950,7 @@ async def _call_tools(
|
|
|
919
950
|
else:
|
|
920
951
|
tasks = [
|
|
921
952
|
asyncio.create_task(
|
|
922
|
-
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)
|
|
953
|
+
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
|
|
923
954
|
name=call.tool_name,
|
|
924
955
|
)
|
|
925
956
|
for call in tool_calls
|
|
@@ -946,15 +977,14 @@ async def _call_tool(
|
|
|
946
977
|
tool_manager: ToolManager[DepsT],
|
|
947
978
|
tool_call: _messages.ToolCallPart,
|
|
948
979
|
tool_call_result: DeferredToolResult | None,
|
|
949
|
-
|
|
950
|
-
) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]:
|
|
980
|
+
) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None]:
|
|
951
981
|
try:
|
|
952
982
|
if tool_call_result is None:
|
|
953
|
-
tool_result = await tool_manager.handle_call(tool_call
|
|
983
|
+
tool_result = await tool_manager.handle_call(tool_call)
|
|
954
984
|
elif isinstance(tool_call_result, ToolApproved):
|
|
955
985
|
if tool_call_result.override_args is not None:
|
|
956
986
|
tool_call = dataclasses.replace(tool_call, args=tool_call_result.override_args)
|
|
957
|
-
tool_result = await tool_manager.handle_call(tool_call
|
|
987
|
+
tool_result = await tool_manager.handle_call(tool_call)
|
|
958
988
|
elif isinstance(tool_call_result, ToolDenied):
|
|
959
989
|
return _messages.ToolReturnPart(
|
|
960
990
|
tool_name=tool_call.tool_name,
|
|
@@ -1024,14 +1054,7 @@ async def _call_tool(
|
|
|
1024
1054
|
metadata=tool_return.metadata,
|
|
1025
1055
|
)
|
|
1026
1056
|
|
|
1027
|
-
|
|
1028
|
-
if tool_return.content:
|
|
1029
|
-
user_part = _messages.UserPromptPart(
|
|
1030
|
-
content=tool_return.content,
|
|
1031
|
-
part_kind='user-prompt',
|
|
1032
|
-
)
|
|
1033
|
-
|
|
1034
|
-
return return_part, user_part
|
|
1057
|
+
return return_part, tool_return.content or None
|
|
1035
1058
|
|
|
1036
1059
|
|
|
1037
1060
|
@dataclasses.dataclass
|
|
@@ -18,7 +18,7 @@ from . import __version__
|
|
|
18
18
|
from ._run_context import AgentDepsT
|
|
19
19
|
from .agent import AbstractAgent, Agent
|
|
20
20
|
from .exceptions import UserError
|
|
21
|
-
from .messages import ModelMessage,
|
|
21
|
+
from .messages import ModelMessage, ModelResponse
|
|
22
22
|
from .models import KnownModelName, infer_model
|
|
23
23
|
from .output import OutputDataT
|
|
24
24
|
|
|
@@ -351,14 +351,11 @@ def handle_slash_command(
|
|
|
351
351
|
console.print('[dim]Exiting…[/dim]')
|
|
352
352
|
return 0, multiline
|
|
353
353
|
elif ident_prompt == '/cp':
|
|
354
|
-
|
|
355
|
-
parts = messages[-1].parts
|
|
356
|
-
except IndexError:
|
|
354
|
+
if not messages or not isinstance(messages[-1], ModelResponse):
|
|
357
355
|
console.print('[dim]No output available to copy.[/dim]')
|
|
358
356
|
else:
|
|
359
|
-
text_to_copy =
|
|
360
|
-
text_to_copy
|
|
361
|
-
if text_to_copy:
|
|
357
|
+
text_to_copy = messages[-1].text
|
|
358
|
+
if text_to_copy and (text_to_copy := text_to_copy.strip()):
|
|
362
359
|
pyperclip.copy(text_to_copy)
|
|
363
360
|
console.print('[dim]Copied last output to clipboard.[/dim]')
|
|
364
361
|
else:
|