pydantic-ai-slim 1.0.1__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/PKG-INFO +8 -6
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_agent_graph.py +50 -31
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_output.py +19 -7
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_parts_manager.py +8 -10
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_tool_manager.py +21 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/ag_ui.py +32 -17
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/__init__.py +3 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/abstract.py +8 -0
- pydantic_ai_slim-1.0.3/pydantic_ai/durable_exec/dbos/__init__.py +6 -0
- pydantic_ai_slim-1.0.3/pydantic_ai/durable_exec/dbos/_agent.py +721 -0
- pydantic_ai_slim-1.0.3/pydantic_ai/durable_exec/dbos/_mcp_server.py +89 -0
- pydantic_ai_slim-1.0.3/pydantic_ai/durable_exec/dbos/_model.py +137 -0
- pydantic_ai_slim-1.0.3/pydantic_ai/durable_exec/dbos/_utils.py +10 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_agent.py +1 -1
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/mcp.py +1 -1
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/messages.py +42 -6
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/__init__.py +8 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/anthropic.py +79 -25
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/bedrock.py +82 -31
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/cohere.py +39 -13
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/function.py +8 -1
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/google.py +105 -37
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/groq.py +35 -7
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/huggingface.py +27 -5
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/instrumented.py +27 -14
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/mistral.py +54 -20
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/openai.py +151 -57
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/openai.py +7 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/bedrock.py +20 -4
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/settings.py +1 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/tools.py +11 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/function.py +7 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pyproject.toml +5 -3
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/.gitignore +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/LICENSE +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/README.md +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_otel_messages.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/agent/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/builtin_tools.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/harmony.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/cerebras.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/litellm.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/ollama.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/run.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/approval_required.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/external.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.1 → pydantic_ai_slim-1.0.3}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.3
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -29,11 +29,11 @@ Classifier: Topic :: Internet
|
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.10
|
|
31
31
|
Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
32
|
-
Requires-Dist: genai-prices>=0.0.
|
|
32
|
+
Requires-Dist: genai-prices>=0.0.23
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.3
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -51,11 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
|
|
|
51
51
|
Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
|
|
52
52
|
Requires-Dist: rich>=13; extra == 'cli'
|
|
53
53
|
Provides-Extra: cohere
|
|
54
|
-
Requires-Dist: cohere>=5.
|
|
54
|
+
Requires-Dist: cohere>=5.17.0; (platform_system != 'Emscripten') and extra == 'cohere'
|
|
55
|
+
Provides-Extra: dbos
|
|
56
|
+
Requires-Dist: dbos>=1.13.0; extra == 'dbos'
|
|
55
57
|
Provides-Extra: duckduckgo
|
|
56
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
57
59
|
Provides-Extra: evals
|
|
58
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.3; extra == 'evals'
|
|
59
61
|
Provides-Extra: google
|
|
60
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
61
63
|
Provides-Extra: groq
|
|
@@ -67,7 +69,7 @@ Requires-Dist: logfire[httpx]>=3.14.1; extra == 'logfire'
|
|
|
67
69
|
Provides-Extra: mcp
|
|
68
70
|
Requires-Dist: mcp>=1.12.3; extra == 'mcp'
|
|
69
71
|
Provides-Extra: mistral
|
|
70
|
-
Requires-Dist: mistralai>=1.9.
|
|
72
|
+
Requires-Dist: mistralai>=1.9.10; extra == 'mistral'
|
|
71
73
|
Provides-Extra: openai
|
|
72
74
|
Requires-Dist: openai>=1.99.9; extra == 'openai'
|
|
73
75
|
Provides-Extra: retries
|
|
@@ -2,6 +2,8 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import dataclasses
|
|
5
|
+
import inspect
|
|
6
|
+
from asyncio import Task
|
|
5
7
|
from collections import defaultdict, deque
|
|
6
8
|
from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Sequence
|
|
7
9
|
from contextlib import asynccontextmanager, contextmanager
|
|
@@ -740,7 +742,6 @@ async def process_function_tools( # noqa: C901
|
|
|
740
742
|
deferred_tool_results: dict[str, DeferredToolResult] = {}
|
|
741
743
|
if build_run_context(ctx).tool_call_approved and ctx.deps.tool_call_results is not None:
|
|
742
744
|
deferred_tool_results = ctx.deps.tool_call_results
|
|
743
|
-
|
|
744
745
|
# Deferred tool calls are "run" as well, by reading their value from the tool call results
|
|
745
746
|
calls_to_run.extend(tool_calls_by_kind['external'])
|
|
746
747
|
calls_to_run.extend(tool_calls_by_kind['unapproved'])
|
|
@@ -819,7 +820,6 @@ async def _call_tools(
|
|
|
819
820
|
for call in tool_calls:
|
|
820
821
|
yield _messages.FunctionToolCallEvent(call)
|
|
821
822
|
|
|
822
|
-
# Run all tool tasks in parallel
|
|
823
823
|
with tracer.start_as_current_span(
|
|
824
824
|
'running tools',
|
|
825
825
|
attributes={
|
|
@@ -827,39 +827,58 @@ async def _call_tools(
|
|
|
827
827
|
'logfire.msg': f'running {len(tool_calls)} tool{"" if len(tool_calls) == 1 else "s"}',
|
|
828
828
|
},
|
|
829
829
|
):
|
|
830
|
-
tasks = [
|
|
831
|
-
asyncio.create_task(
|
|
832
|
-
_call_tool(tool_manager, call, deferred_tool_results.get(call.tool_call_id), usage_limits),
|
|
833
|
-
name=call.tool_name,
|
|
834
|
-
)
|
|
835
|
-
for call in tool_calls
|
|
836
|
-
]
|
|
837
|
-
|
|
838
|
-
pending = tasks
|
|
839
|
-
while pending:
|
|
840
|
-
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
|
841
|
-
for task in done:
|
|
842
|
-
index = tasks.index(task)
|
|
843
|
-
try:
|
|
844
|
-
tool_part, tool_user_part = task.result()
|
|
845
|
-
except exceptions.CallDeferred:
|
|
846
|
-
deferred_calls_by_index[index] = 'external'
|
|
847
|
-
except exceptions.ApprovalRequired:
|
|
848
|
-
deferred_calls_by_index[index] = 'unapproved'
|
|
849
|
-
else:
|
|
850
|
-
yield _messages.FunctionToolResultEvent(tool_part)
|
|
851
830
|
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
831
|
+
async def handle_call_or_result(
|
|
832
|
+
coro_or_task: Awaitable[
|
|
833
|
+
tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]
|
|
834
|
+
]
|
|
835
|
+
| Task[tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]],
|
|
836
|
+
index: int,
|
|
837
|
+
) -> _messages.HandleResponseEvent | None:
|
|
838
|
+
try:
|
|
839
|
+
tool_part, tool_user_part = (
|
|
840
|
+
(await coro_or_task) if inspect.isawaitable(coro_or_task) else coro_or_task.result()
|
|
841
|
+
)
|
|
842
|
+
except exceptions.CallDeferred:
|
|
843
|
+
deferred_calls_by_index[index] = 'external'
|
|
844
|
+
except exceptions.ApprovalRequired:
|
|
845
|
+
deferred_calls_by_index[index] = 'unapproved'
|
|
846
|
+
else:
|
|
847
|
+
tool_parts_by_index[index] = tool_part
|
|
848
|
+
if tool_user_part:
|
|
849
|
+
user_parts_by_index[index] = tool_user_part
|
|
850
|
+
|
|
851
|
+
return _messages.FunctionToolResultEvent(tool_part)
|
|
852
|
+
|
|
853
|
+
if tool_manager.should_call_sequentially(tool_calls):
|
|
854
|
+
for index, call in enumerate(tool_calls):
|
|
855
|
+
if event := await handle_call_or_result(
|
|
856
|
+
_call_tool(tool_manager, call, deferred_tool_results.get(call.tool_call_id), usage_limits),
|
|
857
|
+
index,
|
|
858
|
+
):
|
|
859
|
+
yield event
|
|
860
|
+
|
|
861
|
+
else:
|
|
862
|
+
tasks = [
|
|
863
|
+
asyncio.create_task(
|
|
864
|
+
_call_tool(tool_manager, call, deferred_tool_results.get(call.tool_call_id), usage_limits),
|
|
865
|
+
name=call.tool_name,
|
|
866
|
+
)
|
|
867
|
+
for call in tool_calls
|
|
868
|
+
]
|
|
869
|
+
|
|
870
|
+
pending = tasks
|
|
871
|
+
while pending:
|
|
872
|
+
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
|
873
|
+
for task in done:
|
|
874
|
+
index = tasks.index(task)
|
|
875
|
+
if event := await handle_call_or_result(coro_or_task=task, index=index):
|
|
876
|
+
yield event
|
|
855
877
|
|
|
856
878
|
# We append the results at the end, rather than as they are received, to retain a consistent ordering
|
|
857
879
|
# This is mostly just to simplify testing
|
|
858
|
-
for k in sorted(tool_parts_by_index)
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
for k in sorted(user_parts_by_index):
|
|
862
|
-
output_parts.append(user_parts_by_index[k])
|
|
880
|
+
output_parts.extend([tool_parts_by_index[k] for k in sorted(tool_parts_by_index)])
|
|
881
|
+
output_parts.extend([user_parts_by_index[k] for k in sorted(user_parts_by_index)])
|
|
863
882
|
|
|
864
883
|
for k in sorted(deferred_calls_by_index):
|
|
865
884
|
output_deferred_calls[deferred_calls_by_index[k]].append(tool_calls[k])
|
|
@@ -7,7 +7,7 @@ from collections.abc import Awaitable, Callable, Sequence
|
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
8
|
from typing import TYPE_CHECKING, Any, Generic, Literal, cast, overload
|
|
9
9
|
|
|
10
|
-
from pydantic import TypeAdapter, ValidationError
|
|
10
|
+
from pydantic import Json, TypeAdapter, ValidationError
|
|
11
11
|
from pydantic_core import SchemaValidator, to_json
|
|
12
12
|
from typing_extensions import Self, TypedDict, TypeVar, assert_never
|
|
13
13
|
|
|
@@ -624,21 +624,33 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
|
|
|
624
624
|
json_schema = self._function_schema.json_schema
|
|
625
625
|
json_schema['description'] = self._function_schema.description
|
|
626
626
|
else:
|
|
627
|
-
|
|
627
|
+
json_schema_type_adapter: TypeAdapter[Any]
|
|
628
|
+
validation_type_adapter: TypeAdapter[Any]
|
|
628
629
|
if _utils.is_model_like(output):
|
|
629
|
-
|
|
630
|
+
json_schema_type_adapter = validation_type_adapter = TypeAdapter(output)
|
|
630
631
|
else:
|
|
631
632
|
self.outer_typed_dict_key = 'response'
|
|
633
|
+
output_type: type[OutputDataT] = cast(type[OutputDataT], output)
|
|
634
|
+
|
|
632
635
|
response_data_typed_dict = TypedDict( # noqa: UP013
|
|
633
636
|
'response_data_typed_dict',
|
|
634
|
-
{'response':
|
|
637
|
+
{'response': output_type}, # pyright: ignore[reportInvalidTypeForm]
|
|
638
|
+
)
|
|
639
|
+
json_schema_type_adapter = TypeAdapter(response_data_typed_dict)
|
|
640
|
+
|
|
641
|
+
# More lenient validator: allow either the native type or a JSON string containing it
|
|
642
|
+
# i.e. `response: OutputDataT | Json[OutputDataT]`, as some models don't follow the schema correctly,
|
|
643
|
+
# e.g. `BedrockConverseModel('us.meta.llama3-2-11b-instruct-v1:0')`
|
|
644
|
+
response_validation_typed_dict = TypedDict( # noqa: UP013
|
|
645
|
+
'response_validation_typed_dict',
|
|
646
|
+
{'response': output_type | Json[output_type]}, # pyright: ignore[reportInvalidTypeForm]
|
|
635
647
|
)
|
|
636
|
-
|
|
648
|
+
validation_type_adapter = TypeAdapter(response_validation_typed_dict)
|
|
637
649
|
|
|
638
650
|
# Really a PluggableSchemaValidator, but it's API-compatible
|
|
639
|
-
self.validator = cast(SchemaValidator,
|
|
651
|
+
self.validator = cast(SchemaValidator, validation_type_adapter.validator)
|
|
640
652
|
json_schema = _utils.check_object_json_schema(
|
|
641
|
-
|
|
653
|
+
json_schema_type_adapter.json_schema(schema_generator=GenerateToolJsonSchema)
|
|
642
654
|
)
|
|
643
655
|
|
|
644
656
|
if self.outer_typed_dict_key:
|
|
@@ -156,6 +156,7 @@ class ModelResponsePartsManager:
|
|
|
156
156
|
content: str | None = None,
|
|
157
157
|
id: str | None = None,
|
|
158
158
|
signature: str | None = None,
|
|
159
|
+
provider_name: str | None = None,
|
|
159
160
|
) -> ModelResponseStreamEvent:
|
|
160
161
|
"""Handle incoming thinking content, creating or updating a ThinkingPart in the manager as appropriate.
|
|
161
162
|
|
|
@@ -170,6 +171,7 @@ class ModelResponsePartsManager:
|
|
|
170
171
|
content: The thinking content to append to the appropriate ThinkingPart.
|
|
171
172
|
id: An optional id for the thinking part.
|
|
172
173
|
signature: An optional signature for the thinking content.
|
|
174
|
+
provider_name: An optional provider name for the thinking part.
|
|
173
175
|
|
|
174
176
|
Returns:
|
|
175
177
|
A `PartStartEvent` if a new part was created, or a `PartDeltaEvent` if an existing part was updated.
|
|
@@ -199,7 +201,7 @@ class ModelResponsePartsManager:
|
|
|
199
201
|
if content is not None:
|
|
200
202
|
# There is no existing thinking part that should be updated, so create a new one
|
|
201
203
|
new_part_index = len(self._parts)
|
|
202
|
-
part = ThinkingPart(content=content, id=id, signature=signature)
|
|
204
|
+
part = ThinkingPart(content=content, id=id, signature=signature, provider_name=provider_name)
|
|
203
205
|
if vendor_part_id is not None: # pragma: no branch
|
|
204
206
|
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
205
207
|
self._parts.append(part)
|
|
@@ -207,16 +209,12 @@ class ModelResponsePartsManager:
|
|
|
207
209
|
else:
|
|
208
210
|
raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
|
|
209
211
|
else:
|
|
210
|
-
if content is not None:
|
|
211
|
-
# Update the existing ThinkingPart with the new content delta
|
|
212
|
-
existing_thinking_part, part_index = existing_thinking_part_and_index
|
|
213
|
-
part_delta = ThinkingPartDelta(content_delta=content)
|
|
214
|
-
self._parts[part_index] = part_delta.apply(existing_thinking_part)
|
|
215
|
-
return PartDeltaEvent(index=part_index, delta=part_delta)
|
|
216
|
-
elif signature is not None:
|
|
217
|
-
# Update the existing ThinkingPart with the new signature delta
|
|
212
|
+
if content is not None or signature is not None:
|
|
213
|
+
# Update the existing ThinkingPart with the new content and/or signature delta
|
|
218
214
|
existing_thinking_part, part_index = existing_thinking_part_and_index
|
|
219
|
-
part_delta = ThinkingPartDelta(
|
|
215
|
+
part_delta = ThinkingPartDelta(
|
|
216
|
+
content_delta=content, signature_delta=signature, provider_name=provider_name
|
|
217
|
+
)
|
|
220
218
|
self._parts[part_index] = part_delta.apply(existing_thinking_part)
|
|
221
219
|
return PartDeltaEvent(index=part_index, delta=part_delta)
|
|
222
220
|
else:
|
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
from collections.abc import Iterator
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
from contextvars import ContextVar
|
|
4
7
|
from dataclasses import dataclass, field, replace
|
|
5
8
|
from typing import Any, Generic
|
|
6
9
|
|
|
@@ -16,6 +19,8 @@ from .tools import ToolDefinition
|
|
|
16
19
|
from .toolsets.abstract import AbstractToolset, ToolsetTool
|
|
17
20
|
from .usage import UsageLimits
|
|
18
21
|
|
|
22
|
+
_sequential_tool_calls_ctx_var: ContextVar[bool] = ContextVar('sequential_tool_calls', default=False)
|
|
23
|
+
|
|
19
24
|
|
|
20
25
|
@dataclass
|
|
21
26
|
class ToolManager(Generic[AgentDepsT]):
|
|
@@ -30,6 +35,16 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
30
35
|
failed_tools: set[str] = field(default_factory=set)
|
|
31
36
|
"""Names of tools that failed in this run step."""
|
|
32
37
|
|
|
38
|
+
@classmethod
|
|
39
|
+
@contextmanager
|
|
40
|
+
def sequential_tool_calls(cls) -> Iterator[None]:
|
|
41
|
+
"""Run tool calls sequentially during the context."""
|
|
42
|
+
token = _sequential_tool_calls_ctx_var.set(True)
|
|
43
|
+
try:
|
|
44
|
+
yield
|
|
45
|
+
finally:
|
|
46
|
+
_sequential_tool_calls_ctx_var.reset(token)
|
|
47
|
+
|
|
33
48
|
async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDepsT]:
|
|
34
49
|
"""Build a new tool manager for the next run step, carrying over the retries from the current run step."""
|
|
35
50
|
if self.ctx is not None:
|
|
@@ -56,6 +71,12 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
56
71
|
|
|
57
72
|
return [tool.tool_def for tool in self.tools.values()]
|
|
58
73
|
|
|
74
|
+
def should_call_sequentially(self, calls: list[ToolCallPart]) -> bool:
|
|
75
|
+
"""Whether to require sequential tool calls for a list of tool calls."""
|
|
76
|
+
return _sequential_tool_calls_ctx_var.get() or any(
|
|
77
|
+
tool_def.sequential for call in calls if (tool_def := self.get_tool_def(call.tool_name))
|
|
78
|
+
)
|
|
79
|
+
|
|
59
80
|
def get_tool_def(self, name: str) -> ToolDefinition | None:
|
|
60
81
|
"""Get the tool definition for a given tool name, or `None` if the tool is unknown."""
|
|
61
82
|
if self.tools is None:
|
|
@@ -8,7 +8,7 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import uuid
|
|
11
|
-
from collections.abc import AsyncIterator, Callable, Iterable, Mapping, Sequence
|
|
11
|
+
from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Mapping, Sequence
|
|
12
12
|
from dataclasses import Field, dataclass, replace
|
|
13
13
|
from http import HTTPStatus
|
|
14
14
|
from typing import (
|
|
@@ -17,14 +17,16 @@ from typing import (
|
|
|
17
17
|
Final,
|
|
18
18
|
Generic,
|
|
19
19
|
Protocol,
|
|
20
|
+
TypeAlias,
|
|
20
21
|
TypeVar,
|
|
21
22
|
runtime_checkable,
|
|
22
23
|
)
|
|
23
24
|
|
|
24
25
|
from pydantic import BaseModel, ValidationError
|
|
25
26
|
|
|
27
|
+
from . import _utils
|
|
26
28
|
from ._agent_graph import CallToolsNode, ModelRequestNode
|
|
27
|
-
from .agent import AbstractAgent, AgentRun
|
|
29
|
+
from .agent import AbstractAgent, AgentRun, AgentRunResult
|
|
28
30
|
from .exceptions import UserError
|
|
29
31
|
from .messages import (
|
|
30
32
|
FunctionToolResultEvent,
|
|
@@ -68,9 +70,8 @@ try:
|
|
|
68
70
|
TextMessageContentEvent,
|
|
69
71
|
TextMessageEndEvent,
|
|
70
72
|
TextMessageStartEvent,
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
# ThinkingStartEvent,
|
|
73
|
+
ThinkingEndEvent,
|
|
74
|
+
ThinkingStartEvent,
|
|
74
75
|
ThinkingTextMessageContentEvent,
|
|
75
76
|
ThinkingTextMessageEndEvent,
|
|
76
77
|
ThinkingTextMessageStartEvent,
|
|
@@ -108,6 +109,7 @@ __all__ = [
|
|
|
108
109
|
'StateDeps',
|
|
109
110
|
'StateHandler',
|
|
110
111
|
'AGUIApp',
|
|
112
|
+
'OnCompleteFunc',
|
|
111
113
|
'handle_ag_ui_request',
|
|
112
114
|
'run_ag_ui',
|
|
113
115
|
]
|
|
@@ -115,6 +117,9 @@ __all__ = [
|
|
|
115
117
|
SSE_CONTENT_TYPE: Final[str] = 'text/event-stream'
|
|
116
118
|
"""Content type header value for Server-Sent Events (SSE)."""
|
|
117
119
|
|
|
120
|
+
OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]]
|
|
121
|
+
"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async."""
|
|
122
|
+
|
|
118
123
|
|
|
119
124
|
class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
|
|
120
125
|
"""ASGI application for running Pydantic AI agents with AG-UI protocol support."""
|
|
@@ -221,6 +226,7 @@ async def handle_ag_ui_request(
|
|
|
221
226
|
usage: RunUsage | None = None,
|
|
222
227
|
infer_name: bool = True,
|
|
223
228
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
229
|
+
on_complete: OnCompleteFunc | None = None,
|
|
224
230
|
) -> Response:
|
|
225
231
|
"""Handle an AG-UI request by running the agent and returning a streaming response.
|
|
226
232
|
|
|
@@ -237,6 +243,8 @@ async def handle_ag_ui_request(
|
|
|
237
243
|
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
238
244
|
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
239
245
|
toolsets: Optional additional toolsets for this run.
|
|
246
|
+
on_complete: Optional callback function called when the agent run completes successfully.
|
|
247
|
+
The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data.
|
|
240
248
|
|
|
241
249
|
Returns:
|
|
242
250
|
A streaming Starlette response with AG-UI protocol events.
|
|
@@ -264,6 +272,7 @@ async def handle_ag_ui_request(
|
|
|
264
272
|
usage=usage,
|
|
265
273
|
infer_name=infer_name,
|
|
266
274
|
toolsets=toolsets,
|
|
275
|
+
on_complete=on_complete,
|
|
267
276
|
),
|
|
268
277
|
media_type=accept,
|
|
269
278
|
)
|
|
@@ -282,6 +291,7 @@ async def run_ag_ui(
|
|
|
282
291
|
usage: RunUsage | None = None,
|
|
283
292
|
infer_name: bool = True,
|
|
284
293
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
294
|
+
on_complete: OnCompleteFunc | None = None,
|
|
285
295
|
) -> AsyncIterator[str]:
|
|
286
296
|
"""Run the agent with the AG-UI run input and stream AG-UI protocol events.
|
|
287
297
|
|
|
@@ -299,6 +309,8 @@ async def run_ag_ui(
|
|
|
299
309
|
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
300
310
|
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
301
311
|
toolsets: Optional additional toolsets for this run.
|
|
312
|
+
on_complete: Optional callback function called when the agent run completes successfully.
|
|
313
|
+
The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data.
|
|
302
314
|
|
|
303
315
|
Yields:
|
|
304
316
|
Streaming event chunks encoded as strings according to the accept header value.
|
|
@@ -357,6 +369,12 @@ async def run_ag_ui(
|
|
|
357
369
|
) as run:
|
|
358
370
|
async for event in _agent_stream(run):
|
|
359
371
|
yield encoder.encode(event)
|
|
372
|
+
|
|
373
|
+
if on_complete is not None and run.result is not None:
|
|
374
|
+
if _utils.is_async_callable(on_complete):
|
|
375
|
+
await on_complete(run.result)
|
|
376
|
+
else:
|
|
377
|
+
await _utils.run_in_executor(on_complete, run.result)
|
|
360
378
|
except _RunError as e:
|
|
361
379
|
yield encoder.encode(
|
|
362
380
|
RunErrorEvent(message=e.message, code=e.code),
|
|
@@ -396,10 +414,9 @@ async def _agent_stream(run: AgentRun[AgentDepsT, Any]) -> AsyncIterator[BaseEve
|
|
|
396
414
|
yield stream_ctx.part_end
|
|
397
415
|
stream_ctx.part_end = None
|
|
398
416
|
if stream_ctx.thinking:
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
# )
|
|
417
|
+
yield ThinkingEndEvent(
|
|
418
|
+
type=EventType.THINKING_END,
|
|
419
|
+
)
|
|
403
420
|
stream_ctx.thinking = False
|
|
404
421
|
elif isinstance(node, CallToolsNode):
|
|
405
422
|
async with node.stream(run.ctx) as handle_stream:
|
|
@@ -431,10 +448,9 @@ async def _handle_model_request_event( # noqa: C901
|
|
|
431
448
|
part = agent_event.part
|
|
432
449
|
if isinstance(part, ThinkingPart): # pragma: no branch
|
|
433
450
|
if not stream_ctx.thinking:
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
# )
|
|
451
|
+
yield ThinkingStartEvent(
|
|
452
|
+
type=EventType.THINKING_START,
|
|
453
|
+
)
|
|
438
454
|
stream_ctx.thinking = True
|
|
439
455
|
|
|
440
456
|
if part.content:
|
|
@@ -450,10 +466,9 @@ async def _handle_model_request_event( # noqa: C901
|
|
|
450
466
|
)
|
|
451
467
|
else:
|
|
452
468
|
if stream_ctx.thinking:
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
# )
|
|
469
|
+
yield ThinkingEndEvent(
|
|
470
|
+
type=EventType.THINKING_END,
|
|
471
|
+
)
|
|
457
472
|
stream_ctx.thinking = False
|
|
458
473
|
|
|
459
474
|
if isinstance(part, TextPart):
|
|
@@ -1119,6 +1119,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
1119
1119
|
require_parameter_descriptions: bool = False,
|
|
1120
1120
|
schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
|
|
1121
1121
|
strict: bool | None = None,
|
|
1122
|
+
sequential: bool = False,
|
|
1122
1123
|
requires_approval: bool = False,
|
|
1123
1124
|
) -> Any:
|
|
1124
1125
|
"""Decorator to register a tool function which DOES NOT take `RunContext` as an argument.
|
|
@@ -1164,6 +1165,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
1164
1165
|
schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
|
|
1165
1166
|
strict: Whether to enforce JSON schema compliance (only affects OpenAI).
|
|
1166
1167
|
See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
|
|
1168
|
+
sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
|
|
1167
1169
|
requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
|
|
1168
1170
|
See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
|
|
1169
1171
|
"""
|
|
@@ -1180,6 +1182,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
1180
1182
|
require_parameter_descriptions,
|
|
1181
1183
|
schema_generator,
|
|
1182
1184
|
strict,
|
|
1185
|
+
sequential,
|
|
1183
1186
|
requires_approval,
|
|
1184
1187
|
)
|
|
1185
1188
|
return func_
|
|
@@ -21,6 +21,7 @@ from .. import (
|
|
|
21
21
|
result,
|
|
22
22
|
usage as _usage,
|
|
23
23
|
)
|
|
24
|
+
from .._tool_manager import ToolManager
|
|
24
25
|
from ..output import OutputDataT, OutputSpec
|
|
25
26
|
from ..result import AgentStream, FinalResult, StreamedRunResult
|
|
26
27
|
from ..run import AgentRun, AgentRunResult
|
|
@@ -714,6 +715,13 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
|
|
|
714
715
|
self.name = name
|
|
715
716
|
return
|
|
716
717
|
|
|
718
|
+
@staticmethod
|
|
719
|
+
@contextmanager
|
|
720
|
+
def sequential_tool_calls() -> Iterator[None]:
|
|
721
|
+
"""Run tool calls sequentially during the context."""
|
|
722
|
+
with ToolManager.sequential_tool_calls():
|
|
723
|
+
yield
|
|
724
|
+
|
|
717
725
|
@staticmethod
|
|
718
726
|
def is_model_request_node(
|
|
719
727
|
node: _agent_graph.AgentNode[T, S] | End[result.FinalResult[S]],
|