pydantic-ai-slim 0.7.1__tar.gz → 0.7.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/PKG-INFO +10 -4
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_agent_graph.py +60 -57
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_cli.py +18 -3
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_parts_manager.py +5 -4
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_run_context.py +2 -2
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_tool_manager.py +50 -29
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/ag_ui.py +4 -4
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/agent/__init__.py +69 -84
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/agent/abstract.py +16 -18
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/agent/wrapper.py +4 -6
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/direct.py +4 -4
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_agent.py +13 -15
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_model.py +2 -2
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/messages.py +16 -6
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/__init__.py +5 -5
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/anthropic.py +47 -46
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/bedrock.py +25 -27
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/cohere.py +20 -25
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/fallback.py +15 -15
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/function.py +7 -9
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/gemini.py +43 -39
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/google.py +59 -40
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/groq.py +23 -19
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/huggingface.py +27 -23
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/instrumented.py +4 -4
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/mcp_sampling.py +1 -2
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/mistral.py +24 -22
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/openai.py +101 -45
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/test.py +4 -5
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/__init__.py +10 -1
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/deepseek.py +1 -1
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/moonshotai.py +1 -1
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/openai.py +13 -3
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/qwen.py +4 -1
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/__init__.py +4 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/huggingface.py +27 -0
- pydantic_ai_slim-0.7.3/pydantic_ai/providers/ollama.py +105 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/openai.py +1 -1
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/openrouter.py +2 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/result.py +6 -6
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/run.py +4 -11
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/tools.py +9 -9
- pydantic_ai_slim-0.7.3/pydantic_ai/usage.py +315 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pyproject.toml +10 -3
- pydantic_ai_slim-0.7.1/pydantic_ai/usage.py +0 -153
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/.gitignore +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/LICENSE +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/README.md +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/builtin_tools.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/deferred.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-0.7.1 → pydantic_ai_slim-0.7.3}/pydantic_ai/toolsets/wrapper.py +0 -0
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.3
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
|
+
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
|
+
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
7
|
+
Project-URL: Documentation, https://ai.pydantic.dev/install/#slim-install
|
|
8
|
+
Project-URL: Changelog, https://github.com/pydantic/pydantic-ai/releases
|
|
5
9
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
|
|
6
10
|
License-Expression: MIT
|
|
7
11
|
License-File: LICENSE
|
|
@@ -27,10 +31,11 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
27
31
|
Requires-Python: >=3.9
|
|
28
32
|
Requires-Dist: eval-type-backport>=0.2.0
|
|
29
33
|
Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
34
|
+
Requires-Dist: genai-prices>=0.0.22
|
|
30
35
|
Requires-Dist: griffe>=1.3.2
|
|
31
36
|
Requires-Dist: httpx>=0.27
|
|
32
37
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.7.
|
|
38
|
+
Requires-Dist: pydantic-graph==0.7.3
|
|
34
39
|
Requires-Dist: pydantic>=2.10
|
|
35
40
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
41
|
Provides-Extra: a2a
|
|
@@ -45,13 +50,14 @@ Requires-Dist: boto3>=1.39.0; extra == 'bedrock'
|
|
|
45
50
|
Provides-Extra: cli
|
|
46
51
|
Requires-Dist: argcomplete>=3.5.0; extra == 'cli'
|
|
47
52
|
Requires-Dist: prompt-toolkit>=3; extra == 'cli'
|
|
53
|
+
Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
|
|
48
54
|
Requires-Dist: rich>=13; extra == 'cli'
|
|
49
55
|
Provides-Extra: cohere
|
|
50
56
|
Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'cohere'
|
|
51
57
|
Provides-Extra: duckduckgo
|
|
52
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
53
59
|
Provides-Extra: evals
|
|
54
|
-
Requires-Dist: pydantic-evals==0.7.
|
|
60
|
+
Requires-Dist: pydantic-evals==0.7.3; extra == 'evals'
|
|
55
61
|
Provides-Extra: google
|
|
56
62
|
Requires-Dist: google-genai>=1.28.0; extra == 'google'
|
|
57
63
|
Provides-Extra: groq
|
|
@@ -71,7 +77,7 @@ Requires-Dist: tenacity>=8.2.3; extra == 'retries'
|
|
|
71
77
|
Provides-Extra: tavily
|
|
72
78
|
Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
|
|
73
79
|
Provides-Extra: temporal
|
|
74
|
-
Requires-Dist: temporalio
|
|
80
|
+
Requires-Dist: temporalio==1.15.0; extra == 'temporal'
|
|
75
81
|
Provides-Extra: vertexai
|
|
76
82
|
Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
|
|
77
83
|
Requires-Dist: requests>=2.32.2; extra == 'vertexai'
|
|
@@ -23,7 +23,7 @@ from pydantic_graph.nodes import End, NodeRunEndT
|
|
|
23
23
|
from . import _output, _system_prompt, exceptions, messages as _messages, models, result, usage as _usage
|
|
24
24
|
from .exceptions import ToolRetryError
|
|
25
25
|
from .output import OutputDataT, OutputSpec
|
|
26
|
-
from .settings import ModelSettings
|
|
26
|
+
from .settings import ModelSettings
|
|
27
27
|
from .tools import RunContext, ToolDefinition, ToolKind
|
|
28
28
|
|
|
29
29
|
if TYPE_CHECKING:
|
|
@@ -76,7 +76,7 @@ class GraphAgentState:
|
|
|
76
76
|
"""State kept across the execution of the agent graph."""
|
|
77
77
|
|
|
78
78
|
message_history: list[_messages.ModelMessage]
|
|
79
|
-
usage: _usage.
|
|
79
|
+
usage: _usage.RunUsage
|
|
80
80
|
retries: int
|
|
81
81
|
run_step: int
|
|
82
82
|
|
|
@@ -158,28 +158,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
158
158
|
|
|
159
159
|
async def run(
|
|
160
160
|
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
161
|
-
) -> ModelRequestNode[DepsT, NodeRunEndT]:
|
|
162
|
-
return ModelRequestNode[DepsT, NodeRunEndT](request=await self._get_first_message(ctx))
|
|
163
|
-
|
|
164
|
-
async def _get_first_message(
|
|
165
|
-
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
166
|
-
) -> _messages.ModelRequest:
|
|
167
|
-
run_context = build_run_context(ctx)
|
|
168
|
-
history, next_message = await self._prepare_messages(
|
|
169
|
-
self.user_prompt, ctx.state.message_history, ctx.deps.get_instructions, run_context
|
|
170
|
-
)
|
|
171
|
-
ctx.state.message_history = history
|
|
172
|
-
run_context.messages = history
|
|
173
|
-
|
|
174
|
-
return next_message
|
|
175
|
-
|
|
176
|
-
async def _prepare_messages(
|
|
177
|
-
self,
|
|
178
|
-
user_prompt: str | Sequence[_messages.UserContent] | None,
|
|
179
|
-
message_history: list[_messages.ModelMessage] | None,
|
|
180
|
-
get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]],
|
|
181
|
-
run_context: RunContext[DepsT],
|
|
182
|
-
) -> tuple[list[_messages.ModelMessage], _messages.ModelRequest]:
|
|
161
|
+
) -> Union[ModelRequestNode[DepsT, NodeRunEndT], CallToolsNode[DepsT, NodeRunEndT]]: # noqa UP007
|
|
183
162
|
try:
|
|
184
163
|
ctx_messages = get_captured_run_messages()
|
|
185
164
|
except LookupError:
|
|
@@ -191,29 +170,48 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
191
170
|
messages = ctx_messages.messages
|
|
192
171
|
ctx_messages.used = True
|
|
193
172
|
|
|
173
|
+
# Add message history to the `capture_run_messages` list, which will be empty at this point
|
|
174
|
+
messages.extend(ctx.state.message_history)
|
|
175
|
+
# Use the `capture_run_messages` list as the message history so that new messages are added to it
|
|
176
|
+
ctx.state.message_history = messages
|
|
177
|
+
|
|
178
|
+
run_context = build_run_context(ctx)
|
|
179
|
+
|
|
194
180
|
parts: list[_messages.ModelRequestPart] = []
|
|
195
|
-
|
|
196
|
-
if message_history:
|
|
197
|
-
# Shallow copy messages
|
|
198
|
-
messages.extend(message_history)
|
|
181
|
+
if messages:
|
|
199
182
|
# Reevaluate any dynamic system prompt parts
|
|
200
183
|
await self._reevaluate_dynamic_prompts(messages, run_context)
|
|
201
184
|
else:
|
|
202
185
|
parts.extend(await self._sys_parts(run_context))
|
|
203
186
|
|
|
204
|
-
if
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
187
|
+
if messages and (last_message := messages[-1]):
|
|
188
|
+
if isinstance(last_message, _messages.ModelRequest) and self.user_prompt is None:
|
|
189
|
+
# Drop last message from history and reuse its parts
|
|
190
|
+
messages.pop()
|
|
191
|
+
parts.extend(last_message.parts)
|
|
192
|
+
elif isinstance(last_message, _messages.ModelResponse):
|
|
193
|
+
if self.user_prompt is None:
|
|
194
|
+
# `CallToolsNode` requires the tool manager to be prepared for the run step
|
|
195
|
+
# This will raise errors for any tool name conflicts
|
|
196
|
+
ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
|
|
197
|
+
|
|
198
|
+
# Skip ModelRequestNode and go directly to CallToolsNode
|
|
199
|
+
return CallToolsNode[DepsT, NodeRunEndT](model_response=last_message)
|
|
200
|
+
elif any(isinstance(part, _messages.ToolCallPart) for part in last_message.parts):
|
|
201
|
+
raise exceptions.UserError(
|
|
202
|
+
'Cannot provide a new user prompt when the message history ends with '
|
|
203
|
+
'a model response containing unprocessed tool calls. Either process the '
|
|
204
|
+
'tool calls first (by calling `iter` with `user_prompt=None`) or append a '
|
|
205
|
+
'`ModelRequest` with `ToolResultPart`s.'
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
if self.user_prompt is not None:
|
|
209
|
+
parts.append(_messages.UserPromptPart(self.user_prompt))
|
|
210
|
+
|
|
211
|
+
instructions = await ctx.deps.get_instructions(run_context)
|
|
212
|
+
next_message = _messages.ModelRequest(parts, instructions=instructions)
|
|
215
213
|
|
|
216
|
-
return
|
|
214
|
+
return ModelRequestNode[DepsT, NodeRunEndT](request=next_message)
|
|
217
215
|
|
|
218
216
|
async def _reevaluate_dynamic_prompts(
|
|
219
217
|
self, messages: list[_messages.ModelMessage], run_context: RunContext[DepsT]
|
|
@@ -250,9 +248,6 @@ async def _prepare_request_parameters(
|
|
|
250
248
|
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
251
249
|
) -> models.ModelRequestParameters:
|
|
252
250
|
"""Build tools and create an agent model."""
|
|
253
|
-
run_context = build_run_context(ctx)
|
|
254
|
-
ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
|
|
255
|
-
|
|
256
251
|
output_schema = ctx.deps.output_schema
|
|
257
252
|
output_object = None
|
|
258
253
|
if isinstance(output_schema, _output.NativeOutputSchema):
|
|
@@ -342,7 +337,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
342
337
|
|
|
343
338
|
model_settings, model_request_parameters, message_history, _ = await self._prepare_request(ctx)
|
|
344
339
|
model_response = await ctx.deps.model.request(message_history, model_settings, model_request_parameters)
|
|
345
|
-
ctx.state.usage.
|
|
340
|
+
ctx.state.usage.requests += 1
|
|
346
341
|
|
|
347
342
|
return self._finish_handling(ctx, model_response)
|
|
348
343
|
|
|
@@ -355,21 +350,21 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
355
350
|
|
|
356
351
|
run_context = build_run_context(ctx)
|
|
357
352
|
|
|
358
|
-
|
|
353
|
+
# This will raise errors for any tool name conflicts
|
|
354
|
+
ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
|
|
355
|
+
|
|
356
|
+
message_history = await _process_message_history(ctx.state, ctx.deps.history_processors, run_context)
|
|
359
357
|
|
|
360
358
|
model_request_parameters = await _prepare_request_parameters(ctx)
|
|
361
359
|
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
|
|
362
360
|
|
|
363
|
-
|
|
364
|
-
|
|
361
|
+
model_settings = ctx.deps.model_settings
|
|
365
362
|
usage = ctx.state.usage
|
|
366
363
|
if ctx.deps.usage_limits.count_tokens_before_request:
|
|
367
364
|
# Copy to avoid modifying the original usage object with the counted usage
|
|
368
365
|
usage = dataclasses.replace(usage)
|
|
369
366
|
|
|
370
|
-
counted_usage = await ctx.deps.model.count_tokens(
|
|
371
|
-
message_history, ctx.deps.model_settings, model_request_parameters
|
|
372
|
-
)
|
|
367
|
+
counted_usage = await ctx.deps.model.count_tokens(message_history, model_settings, model_request_parameters)
|
|
373
368
|
usage.incr(counted_usage)
|
|
374
369
|
|
|
375
370
|
ctx.deps.usage_limits.check_before_request(usage)
|
|
@@ -432,9 +427,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
432
427
|
if self._events_iterator is None:
|
|
433
428
|
# Ensure that the stream is only run once
|
|
434
429
|
|
|
435
|
-
async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]:
|
|
430
|
+
async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
|
|
436
431
|
texts: list[str] = []
|
|
437
432
|
tool_calls: list[_messages.ToolCallPart] = []
|
|
433
|
+
thinking_parts: list[_messages.ThinkingPart] = []
|
|
434
|
+
|
|
438
435
|
for part in self.model_response.parts:
|
|
439
436
|
if isinstance(part, _messages.TextPart):
|
|
440
437
|
# ignore empty content for text parts, see #437
|
|
@@ -447,11 +444,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
447
444
|
elif isinstance(part, _messages.BuiltinToolReturnPart):
|
|
448
445
|
yield _messages.BuiltinToolResultEvent(part)
|
|
449
446
|
elif isinstance(part, _messages.ThinkingPart):
|
|
450
|
-
|
|
451
|
-
# We need to handle text parts in case there are no tool calls and/or the desired output comes
|
|
452
|
-
# from the text, but thinking parts should not directly influence the execution of tools or
|
|
453
|
-
# determination of the next node of graph execution here.
|
|
454
|
-
pass
|
|
447
|
+
thinking_parts.append(part)
|
|
455
448
|
else:
|
|
456
449
|
assert_never(part)
|
|
457
450
|
|
|
@@ -465,8 +458,18 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
465
458
|
elif texts:
|
|
466
459
|
# No events are emitted during the handling of text responses, so we don't need to yield anything
|
|
467
460
|
self._next_node = await self._handle_text_response(ctx, texts)
|
|
461
|
+
elif thinking_parts:
|
|
462
|
+
# handle thinking-only responses (responses that contain only ThinkingPart instances)
|
|
463
|
+
# this can happen with models that support thinking mode when they don't provide
|
|
464
|
+
# actionable output alongside their thinking content.
|
|
465
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
|
|
466
|
+
_messages.ModelRequest(
|
|
467
|
+
parts=[_messages.RetryPromptPart('Responses without text or tool calls are not permitted.')]
|
|
468
|
+
)
|
|
469
|
+
)
|
|
468
470
|
else:
|
|
469
|
-
# we
|
|
471
|
+
# we got an empty response with no tool calls, text, or thinking
|
|
472
|
+
# this sometimes happens with anthropic (and perhaps other models)
|
|
470
473
|
# when the model has already returned text along side tool calls
|
|
471
474
|
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
472
475
|
# response, if any
|
|
@@ -18,12 +18,13 @@ from . import __version__
|
|
|
18
18
|
from ._run_context import AgentDepsT
|
|
19
19
|
from .agent import AbstractAgent, Agent
|
|
20
20
|
from .exceptions import UserError
|
|
21
|
-
from .messages import ModelMessage
|
|
21
|
+
from .messages import ModelMessage, TextPart
|
|
22
22
|
from .models import KnownModelName, infer_model
|
|
23
23
|
from .output import OutputDataT
|
|
24
24
|
|
|
25
25
|
try:
|
|
26
26
|
import argcomplete
|
|
27
|
+
import pyperclip
|
|
27
28
|
from prompt_toolkit import PromptSession
|
|
28
29
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
|
|
29
30
|
from prompt_toolkit.buffer import Buffer
|
|
@@ -38,7 +39,7 @@ try:
|
|
|
38
39
|
from rich.text import Text
|
|
39
40
|
except ImportError as _import_error:
|
|
40
41
|
raise ImportError(
|
|
41
|
-
'Please install `rich`, `prompt-toolkit` and `argcomplete` to use the Pydantic AI CLI, '
|
|
42
|
+
'Please install `rich`, `prompt-toolkit`, `pyperclip` and `argcomplete` to use the Pydantic AI CLI, '
|
|
42
43
|
'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
|
|
43
44
|
) from _import_error
|
|
44
45
|
|
|
@@ -114,6 +115,7 @@ Special prompts:
|
|
|
114
115
|
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
|
|
115
116
|
* `/markdown` - show the last markdown output of the last question
|
|
116
117
|
* `/multiline` - toggle multiline mode
|
|
118
|
+
* `/cp` - copy the last response to clipboard
|
|
117
119
|
""",
|
|
118
120
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
119
121
|
)
|
|
@@ -237,7 +239,7 @@ async def run_chat(
|
|
|
237
239
|
|
|
238
240
|
while True:
|
|
239
241
|
try:
|
|
240
|
-
auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit'])
|
|
242
|
+
auto_suggest = CustomAutoSuggest(['/markdown', '/multiline', '/exit', '/cp'])
|
|
241
243
|
text = await session.prompt_async(f'{prog_name} ➤ ', auto_suggest=auto_suggest, multiline=multiline)
|
|
242
244
|
except (KeyboardInterrupt, EOFError): # pragma: no cover
|
|
243
245
|
return 0
|
|
@@ -347,6 +349,19 @@ def handle_slash_command(
|
|
|
347
349
|
elif ident_prompt == '/exit':
|
|
348
350
|
console.print('[dim]Exiting…[/dim]')
|
|
349
351
|
return 0, multiline
|
|
352
|
+
elif ident_prompt == '/cp':
|
|
353
|
+
try:
|
|
354
|
+
parts = messages[-1].parts
|
|
355
|
+
except IndexError:
|
|
356
|
+
console.print('[dim]No output available to copy.[/dim]')
|
|
357
|
+
else:
|
|
358
|
+
text_to_copy = '\n\n'.join(part.content for part in parts if isinstance(part, TextPart))
|
|
359
|
+
text_to_copy = text_to_copy.strip()
|
|
360
|
+
if text_to_copy:
|
|
361
|
+
pyperclip.copy(text_to_copy)
|
|
362
|
+
console.print('[dim]Copied last output to clipboard.[/dim]')
|
|
363
|
+
else:
|
|
364
|
+
console.print('[dim]No text content to copy.[/dim]')
|
|
350
365
|
else:
|
|
351
366
|
console.print(f'[red]Unknown command[/red] [magenta]`{ident_prompt}`[/magenta]')
|
|
352
367
|
return None, multiline
|
|
@@ -72,6 +72,7 @@ class ModelResponsePartsManager:
|
|
|
72
72
|
vendor_part_id: VendorId | None,
|
|
73
73
|
content: str,
|
|
74
74
|
thinking_tags: tuple[str, str] | None = None,
|
|
75
|
+
ignore_leading_whitespace: bool = False,
|
|
75
76
|
) -> ModelResponseStreamEvent | None:
|
|
76
77
|
"""Handle incoming text content, creating or updating a TextPart in the manager as appropriate.
|
|
77
78
|
|
|
@@ -85,6 +86,7 @@ class ModelResponsePartsManager:
|
|
|
85
86
|
a TextPart.
|
|
86
87
|
content: The text content to append to the appropriate TextPart.
|
|
87
88
|
thinking_tags: If provided, will handle content between the thinking tags as thinking parts.
|
|
89
|
+
ignore_leading_whitespace: If True, will ignore leading whitespace in the content.
|
|
88
90
|
|
|
89
91
|
Returns:
|
|
90
92
|
- A `PartStartEvent` if a new part was created.
|
|
@@ -128,10 +130,9 @@ class ModelResponsePartsManager:
|
|
|
128
130
|
return self.handle_thinking_delta(vendor_part_id=vendor_part_id, content='')
|
|
129
131
|
|
|
130
132
|
if existing_text_part_and_index is None:
|
|
131
|
-
#
|
|
132
|
-
#
|
|
133
|
-
|
|
134
|
-
if content.isspace():
|
|
133
|
+
# This is a workaround for models that emit `<think>\n</think>\n\n` or an empty text part ahead of tool calls (e.g. Ollama + Qwen3),
|
|
134
|
+
# which we don't want to end up treating as a final result when using `run_stream` with `str` a valid `output_type`.
|
|
135
|
+
if ignore_leading_whitespace and (len(content) == 0 or content.isspace()):
|
|
135
136
|
return None
|
|
136
137
|
|
|
137
138
|
# There is no existing text part that should be updated, so create a new one
|
|
@@ -12,7 +12,7 @@ from . import _utils, messages as _messages
|
|
|
12
12
|
|
|
13
13
|
if TYPE_CHECKING:
|
|
14
14
|
from .models import Model
|
|
15
|
-
from .result import
|
|
15
|
+
from .result import RunUsage
|
|
16
16
|
|
|
17
17
|
AgentDepsT = TypeVar('AgentDepsT', default=None, contravariant=True)
|
|
18
18
|
"""Type variable for agent dependencies."""
|
|
@@ -26,7 +26,7 @@ class RunContext(Generic[AgentDepsT]):
|
|
|
26
26
|
"""Dependencies for the agent."""
|
|
27
27
|
model: Model
|
|
28
28
|
"""The model used in this run."""
|
|
29
|
-
usage:
|
|
29
|
+
usage: RunUsage
|
|
30
30
|
"""LLM usage associated with the run."""
|
|
31
31
|
prompt: str | Sequence[_messages.UserContent] | None = None
|
|
32
32
|
"""The original user prompt passed to the run."""
|
|
@@ -5,6 +5,7 @@ from collections.abc import Iterable
|
|
|
5
5
|
from dataclasses import dataclass, field, replace
|
|
6
6
|
from typing import Any, Generic
|
|
7
7
|
|
|
8
|
+
from opentelemetry.trace import Tracer
|
|
8
9
|
from pydantic import ValidationError
|
|
9
10
|
from typing_extensions import assert_never
|
|
10
11
|
|
|
@@ -21,41 +22,46 @@ from .toolsets.abstract import AbstractToolset, ToolsetTool
|
|
|
21
22
|
class ToolManager(Generic[AgentDepsT]):
|
|
22
23
|
"""Manages tools for an agent run step. It caches the agent run's toolset's tool definitions and handles calling tools and retries."""
|
|
23
24
|
|
|
24
|
-
ctx: RunContext[AgentDepsT]
|
|
25
|
-
"""The agent run context for a specific run step."""
|
|
26
25
|
toolset: AbstractToolset[AgentDepsT]
|
|
27
26
|
"""The toolset that provides the tools for this run step."""
|
|
28
|
-
|
|
27
|
+
ctx: RunContext[AgentDepsT] | None = None
|
|
28
|
+
"""The agent run context for a specific run step."""
|
|
29
|
+
tools: dict[str, ToolsetTool[AgentDepsT]] | None = None
|
|
29
30
|
"""The cached tools for this run step."""
|
|
30
31
|
failed_tools: set[str] = field(default_factory=set)
|
|
31
32
|
"""Names of tools that failed in this run step."""
|
|
32
33
|
|
|
33
|
-
@classmethod
|
|
34
|
-
async def build(cls, toolset: AbstractToolset[AgentDepsT], ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDepsT]:
|
|
35
|
-
"""Build a new tool manager for a specific run step."""
|
|
36
|
-
return cls(
|
|
37
|
-
ctx=ctx,
|
|
38
|
-
toolset=toolset,
|
|
39
|
-
tools=await toolset.get_tools(ctx),
|
|
40
|
-
)
|
|
41
|
-
|
|
42
34
|
async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDepsT]:
|
|
43
35
|
"""Build a new tool manager for the next run step, carrying over the retries from the current run step."""
|
|
44
|
-
if ctx
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
36
|
+
if self.ctx is not None:
|
|
37
|
+
if ctx.run_step == self.ctx.run_step:
|
|
38
|
+
return self
|
|
39
|
+
|
|
40
|
+
retries = {
|
|
41
|
+
failed_tool_name: self.ctx.retries.get(failed_tool_name, 0) + 1
|
|
42
|
+
for failed_tool_name in self.failed_tools
|
|
43
|
+
}
|
|
44
|
+
ctx = replace(ctx, retries=retries)
|
|
45
|
+
|
|
46
|
+
return self.__class__(
|
|
47
|
+
toolset=self.toolset,
|
|
48
|
+
ctx=ctx,
|
|
49
|
+
tools=await self.toolset.get_tools(ctx),
|
|
50
|
+
)
|
|
51
51
|
|
|
52
52
|
@property
|
|
53
53
|
def tool_defs(self) -> list[ToolDefinition]:
|
|
54
54
|
"""The tool definitions for the tools in this tool manager."""
|
|
55
|
+
if self.tools is None:
|
|
56
|
+
raise ValueError('ToolManager has not been prepared for a run step yet') # pragma: no cover
|
|
57
|
+
|
|
55
58
|
return [tool.tool_def for tool in self.tools.values()]
|
|
56
59
|
|
|
57
60
|
def get_tool_def(self, name: str) -> ToolDefinition | None:
|
|
58
61
|
"""Get the tool definition for a given tool name, or `None` if the tool is unknown."""
|
|
62
|
+
if self.tools is None:
|
|
63
|
+
raise ValueError('ToolManager has not been prepared for a run step yet') # pragma: no cover
|
|
64
|
+
|
|
59
65
|
try:
|
|
60
66
|
return self.tools[name].tool_def
|
|
61
67
|
except KeyError:
|
|
@@ -71,15 +77,25 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
71
77
|
allow_partial: Whether to allow partial validation of the tool arguments.
|
|
72
78
|
wrap_validation_errors: Whether to wrap validation errors in a retry prompt part.
|
|
73
79
|
"""
|
|
80
|
+
if self.tools is None or self.ctx is None:
|
|
81
|
+
raise ValueError('ToolManager has not been prepared for a run step yet') # pragma: no cover
|
|
82
|
+
|
|
74
83
|
if (tool := self.tools.get(call.tool_name)) and tool.tool_def.kind == 'output':
|
|
75
84
|
# Output tool calls are not traced
|
|
76
85
|
return await self._call_tool(call, allow_partial, wrap_validation_errors)
|
|
77
86
|
else:
|
|
78
|
-
return await self._call_tool_traced(
|
|
87
|
+
return await self._call_tool_traced(
|
|
88
|
+
call,
|
|
89
|
+
allow_partial,
|
|
90
|
+
wrap_validation_errors,
|
|
91
|
+
self.ctx.tracer,
|
|
92
|
+
self.ctx.trace_include_content,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
async def _call_tool(self, call: ToolCallPart, allow_partial: bool, wrap_validation_errors: bool) -> Any:
|
|
96
|
+
if self.tools is None or self.ctx is None:
|
|
97
|
+
raise ValueError('ToolManager has not been prepared for a run step yet') # pragma: no cover
|
|
79
98
|
|
|
80
|
-
async def _call_tool(
|
|
81
|
-
self, call: ToolCallPart, allow_partial: bool = False, wrap_validation_errors: bool = True
|
|
82
|
-
) -> Any:
|
|
83
99
|
name = call.tool_name
|
|
84
100
|
tool = self.tools.get(name)
|
|
85
101
|
try:
|
|
@@ -137,14 +153,19 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
137
153
|
raise e
|
|
138
154
|
|
|
139
155
|
async def _call_tool_traced(
|
|
140
|
-
self,
|
|
156
|
+
self,
|
|
157
|
+
call: ToolCallPart,
|
|
158
|
+
allow_partial: bool,
|
|
159
|
+
wrap_validation_errors: bool,
|
|
160
|
+
tracer: Tracer,
|
|
161
|
+
include_content: bool = False,
|
|
141
162
|
) -> Any:
|
|
142
163
|
"""See <https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span>."""
|
|
143
164
|
span_attributes = {
|
|
144
165
|
'gen_ai.tool.name': call.tool_name,
|
|
145
166
|
# NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai
|
|
146
167
|
'gen_ai.tool.call.id': call.tool_call_id,
|
|
147
|
-
**({'tool_arguments': call.args_as_json_str()} if
|
|
168
|
+
**({'tool_arguments': call.args_as_json_str()} if include_content else {}),
|
|
148
169
|
'logfire.msg': f'running tool: {call.tool_name}',
|
|
149
170
|
# add the JSON schema so these attributes are formatted nicely in Logfire
|
|
150
171
|
'logfire.json_schema': json.dumps(
|
|
@@ -156,7 +177,7 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
156
177
|
'tool_arguments': {'type': 'object'},
|
|
157
178
|
'tool_response': {'type': 'object'},
|
|
158
179
|
}
|
|
159
|
-
if
|
|
180
|
+
if include_content
|
|
160
181
|
else {}
|
|
161
182
|
),
|
|
162
183
|
'gen_ai.tool.name': {},
|
|
@@ -165,16 +186,16 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
165
186
|
}
|
|
166
187
|
),
|
|
167
188
|
}
|
|
168
|
-
with
|
|
189
|
+
with tracer.start_as_current_span('running tool', attributes=span_attributes) as span:
|
|
169
190
|
try:
|
|
170
191
|
tool_result = await self._call_tool(call, allow_partial, wrap_validation_errors)
|
|
171
192
|
except ToolRetryError as e:
|
|
172
193
|
part = e.tool_retry
|
|
173
|
-
if
|
|
194
|
+
if include_content and span.is_recording():
|
|
174
195
|
span.set_attribute('tool_response', part.model_response())
|
|
175
196
|
raise e
|
|
176
197
|
|
|
177
|
-
if
|
|
198
|
+
if include_content and span.is_recording():
|
|
178
199
|
span.set_attribute(
|
|
179
200
|
'tool_response',
|
|
180
201
|
tool_result
|
|
@@ -51,7 +51,7 @@ from .settings import ModelSettings
|
|
|
51
51
|
from .tools import AgentDepsT, ToolDefinition
|
|
52
52
|
from .toolsets import AbstractToolset
|
|
53
53
|
from .toolsets.deferred import DeferredToolset
|
|
54
|
-
from .usage import
|
|
54
|
+
from .usage import RunUsage, UsageLimits
|
|
55
55
|
|
|
56
56
|
try:
|
|
57
57
|
from ag_ui.core import (
|
|
@@ -127,7 +127,7 @@ class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
|
|
|
127
127
|
deps: AgentDepsT = None,
|
|
128
128
|
model_settings: ModelSettings | None = None,
|
|
129
129
|
usage_limits: UsageLimits | None = None,
|
|
130
|
-
usage:
|
|
130
|
+
usage: RunUsage | None = None,
|
|
131
131
|
infer_name: bool = True,
|
|
132
132
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
133
133
|
# Starlette parameters.
|
|
@@ -216,7 +216,7 @@ async def handle_ag_ui_request(
|
|
|
216
216
|
deps: AgentDepsT = None,
|
|
217
217
|
model_settings: ModelSettings | None = None,
|
|
218
218
|
usage_limits: UsageLimits | None = None,
|
|
219
|
-
usage:
|
|
219
|
+
usage: RunUsage | None = None,
|
|
220
220
|
infer_name: bool = True,
|
|
221
221
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
222
222
|
) -> Response:
|
|
@@ -277,7 +277,7 @@ async def run_ag_ui(
|
|
|
277
277
|
deps: AgentDepsT = None,
|
|
278
278
|
model_settings: ModelSettings | None = None,
|
|
279
279
|
usage_limits: UsageLimits | None = None,
|
|
280
|
-
usage:
|
|
280
|
+
usage: RunUsage | None = None,
|
|
281
281
|
infer_name: bool = True,
|
|
282
282
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
283
283
|
) -> AsyncIterator[str]:
|