pydantic-ai-slim 1.0.12__tar.gz → 1.0.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/PKG-INFO +3 -3
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_agent_graph.py +4 -0
- pydantic_ai_slim-1.0.13/pydantic_ai/_instrumentation.py +95 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_output.py +26 -12
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_run_context.py +4 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_tool_manager.py +15 -7
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/agent/__init__.py +67 -34
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/agent/abstract.py +12 -1
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/agent/wrapper.py +11 -3
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/direct.py +2 -2
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/dbos/_agent.py +11 -2
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_agent.py +12 -3
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/mcp.py +12 -2
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/__init__.py +18 -1
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/anthropic.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/bedrock.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/cohere.py +4 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/fallback.py +2 -9
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/function.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/gemini.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/google.py +12 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/groq.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/huggingface.py +8 -2
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/instrumented.py +16 -6
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/mcp_sampling.py +2 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/mistral.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/openai.py +16 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/test.py +8 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/models/wrapper.py +7 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/.gitignore +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/LICENSE +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/README.md +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_json_schema.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_otel_messages.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/builtin_tools.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/harmony.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/cerebras.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/gateway.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/litellm.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/ollama.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/run.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/approval_required.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/external.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-1.0.12 → pydantic_ai_slim-1.0.13}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.13
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.28
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.13
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.13; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -16,6 +16,7 @@ from opentelemetry.trace import Tracer
|
|
|
16
16
|
from typing_extensions import TypeVar, assert_never
|
|
17
17
|
|
|
18
18
|
from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore
|
|
19
|
+
from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION
|
|
19
20
|
from pydantic_ai._tool_manager import ToolManager
|
|
20
21
|
from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor
|
|
21
22
|
from pydantic_ai.builtin_tools import AbstractBuiltinTool
|
|
@@ -704,6 +705,9 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT
|
|
|
704
705
|
tracer=ctx.deps.tracer,
|
|
705
706
|
trace_include_content=ctx.deps.instrumentation_settings is not None
|
|
706
707
|
and ctx.deps.instrumentation_settings.include_content,
|
|
708
|
+
instrumentation_version=ctx.deps.instrumentation_settings.version
|
|
709
|
+
if ctx.deps.instrumentation_settings
|
|
710
|
+
else DEFAULT_INSTRUMENTATION_VERSION,
|
|
707
711
|
run_step=ctx.state.run_step,
|
|
708
712
|
tool_call_approved=ctx.state.run_step == 0,
|
|
709
713
|
)
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from typing_extensions import Self
|
|
8
|
+
|
|
9
|
+
DEFAULT_INSTRUMENTATION_VERSION = 2
|
|
10
|
+
"""Default instrumentation version for `InstrumentationSettings`."""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass(frozen=True)
|
|
14
|
+
class InstrumentationNames:
|
|
15
|
+
"""Configuration for instrumentation span names and attributes based on version."""
|
|
16
|
+
|
|
17
|
+
# Agent run span configuration
|
|
18
|
+
agent_run_span_name: str
|
|
19
|
+
agent_name_attr: str
|
|
20
|
+
|
|
21
|
+
# Tool execution span configuration
|
|
22
|
+
tool_span_name: str
|
|
23
|
+
tool_arguments_attr: str
|
|
24
|
+
tool_result_attr: str
|
|
25
|
+
|
|
26
|
+
# Output Tool execution span configuration
|
|
27
|
+
output_tool_span_name: str
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def for_version(cls, version: int) -> Self:
|
|
31
|
+
"""Create instrumentation configuration for a specific version.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
version: The instrumentation version (1, 2, or 3+)
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
InstrumentationConfig instance with version-appropriate settings
|
|
38
|
+
"""
|
|
39
|
+
if version <= 2:
|
|
40
|
+
return cls(
|
|
41
|
+
agent_run_span_name='agent run',
|
|
42
|
+
agent_name_attr='agent_name',
|
|
43
|
+
tool_span_name='running tool',
|
|
44
|
+
tool_arguments_attr='tool_arguments',
|
|
45
|
+
tool_result_attr='tool_response',
|
|
46
|
+
output_tool_span_name='running output function',
|
|
47
|
+
)
|
|
48
|
+
else:
|
|
49
|
+
return cls(
|
|
50
|
+
agent_run_span_name='invoke_agent',
|
|
51
|
+
agent_name_attr='gen_ai.agent.name',
|
|
52
|
+
tool_span_name='execute_tool', # Will be formatted with tool name
|
|
53
|
+
tool_arguments_attr='gen_ai.tool.call.arguments',
|
|
54
|
+
tool_result_attr='gen_ai.tool.call.result',
|
|
55
|
+
output_tool_span_name='execute_tool',
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def get_agent_run_span_name(self, agent_name: str) -> str:
|
|
59
|
+
"""Get the formatted agent span name.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
agent_name: Name of the agent being executed
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Formatted span name
|
|
66
|
+
"""
|
|
67
|
+
if self.agent_run_span_name == 'invoke_agent':
|
|
68
|
+
return f'invoke_agent {agent_name}'
|
|
69
|
+
return self.agent_run_span_name
|
|
70
|
+
|
|
71
|
+
def get_tool_span_name(self, tool_name: str) -> str:
|
|
72
|
+
"""Get the formatted tool span name.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
tool_name: Name of the tool being executed
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Formatted span name
|
|
79
|
+
"""
|
|
80
|
+
if self.tool_span_name == 'execute_tool':
|
|
81
|
+
return f'execute_tool {tool_name}'
|
|
82
|
+
return self.tool_span_name
|
|
83
|
+
|
|
84
|
+
def get_output_tool_span_name(self, tool_name: str) -> str:
|
|
85
|
+
"""Get the formatted output tool span name.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
tool_name: Name of the tool being executed
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Formatted span name
|
|
92
|
+
"""
|
|
93
|
+
if self.output_tool_span_name == 'execute_tool':
|
|
94
|
+
return f'execute_tool {tool_name}'
|
|
95
|
+
return self.output_tool_span_name
|
|
@@ -11,6 +11,8 @@ from pydantic import Json, TypeAdapter, ValidationError
|
|
|
11
11
|
from pydantic_core import SchemaValidator, to_json
|
|
12
12
|
from typing_extensions import Self, TypedDict, TypeVar, assert_never
|
|
13
13
|
|
|
14
|
+
from pydantic_ai._instrumentation import InstrumentationNames
|
|
15
|
+
|
|
14
16
|
from . import _function_schema, _utils, messages as _messages
|
|
15
17
|
from ._run_context import AgentDepsT, RunContext
|
|
16
18
|
from .exceptions import ModelRetry, ToolRetryError, UserError
|
|
@@ -95,6 +97,7 @@ async def execute_traced_output_function(
|
|
|
95
97
|
ToolRetryError: When wrap_validation_errors is True and a ModelRetry is caught
|
|
96
98
|
ModelRetry: When wrap_validation_errors is False and a ModelRetry occurs
|
|
97
99
|
"""
|
|
100
|
+
instrumentation_names = InstrumentationNames.for_version(run_context.instrumentation_version)
|
|
98
101
|
# Set up span attributes
|
|
99
102
|
tool_name = run_context.tool_name or getattr(function_schema.function, '__name__', 'output_function')
|
|
100
103
|
attributes = {
|
|
@@ -104,18 +107,29 @@ async def execute_traced_output_function(
|
|
|
104
107
|
if run_context.tool_call_id:
|
|
105
108
|
attributes['gen_ai.tool.call.id'] = run_context.tool_call_id
|
|
106
109
|
if run_context.trace_include_content:
|
|
107
|
-
attributes[
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
110
|
+
attributes[instrumentation_names.tool_arguments_attr] = to_json(args).decode()
|
|
111
|
+
|
|
112
|
+
attributes['logfire.json_schema'] = json.dumps(
|
|
113
|
+
{
|
|
114
|
+
'type': 'object',
|
|
115
|
+
'properties': {
|
|
116
|
+
**(
|
|
117
|
+
{
|
|
118
|
+
instrumentation_names.tool_arguments_attr: {'type': 'object'},
|
|
119
|
+
instrumentation_names.tool_result_attr: {'type': 'object'},
|
|
120
|
+
}
|
|
121
|
+
if run_context.trace_include_content
|
|
122
|
+
else {}
|
|
123
|
+
),
|
|
124
|
+
'gen_ai.tool.name': {},
|
|
125
|
+
**({'gen_ai.tool.call.id': {}} if run_context.tool_call_id else {}),
|
|
126
|
+
},
|
|
127
|
+
}
|
|
128
|
+
)
|
|
117
129
|
|
|
118
|
-
with run_context.tracer.start_as_current_span(
|
|
130
|
+
with run_context.tracer.start_as_current_span(
|
|
131
|
+
instrumentation_names.get_output_tool_span_name(tool_name), attributes=attributes
|
|
132
|
+
) as span:
|
|
119
133
|
try:
|
|
120
134
|
output = await function_schema.call(args, run_context)
|
|
121
135
|
except ModelRetry as r:
|
|
@@ -135,7 +149,7 @@ async def execute_traced_output_function(
|
|
|
135
149
|
from .models.instrumented import InstrumentedModel
|
|
136
150
|
|
|
137
151
|
span.set_attribute(
|
|
138
|
-
|
|
152
|
+
instrumentation_names.tool_result_attr,
|
|
139
153
|
output if isinstance(output, str) else json.dumps(InstrumentedModel.serialize_any(output)),
|
|
140
154
|
)
|
|
141
155
|
|
|
@@ -8,6 +8,8 @@ from typing import TYPE_CHECKING, Generic
|
|
|
8
8
|
from opentelemetry.trace import NoOpTracer, Tracer
|
|
9
9
|
from typing_extensions import TypeVar
|
|
10
10
|
|
|
11
|
+
from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION
|
|
12
|
+
|
|
11
13
|
from . import _utils, messages as _messages
|
|
12
14
|
|
|
13
15
|
if TYPE_CHECKING:
|
|
@@ -36,6 +38,8 @@ class RunContext(Generic[AgentDepsT]):
|
|
|
36
38
|
"""The tracer to use for tracing the run."""
|
|
37
39
|
trace_include_content: bool = False
|
|
38
40
|
"""Whether to include the content of the messages in the trace."""
|
|
41
|
+
instrumentation_version: int = DEFAULT_INSTRUMENTATION_VERSION
|
|
42
|
+
"""Instrumentation settings version, if instrumentation is enabled."""
|
|
39
43
|
retries: dict[str, int] = field(default_factory=dict)
|
|
40
44
|
"""Number of retries for each tool so far."""
|
|
41
45
|
tool_call_id: str | None = None
|
|
@@ -12,6 +12,7 @@ from pydantic import ValidationError
|
|
|
12
12
|
from typing_extensions import assert_never
|
|
13
13
|
|
|
14
14
|
from . import messages as _messages
|
|
15
|
+
from ._instrumentation import InstrumentationNames
|
|
15
16
|
from ._run_context import AgentDepsT, RunContext
|
|
16
17
|
from .exceptions import ModelRetry, ToolRetryError, UnexpectedModelBehavior
|
|
17
18
|
from .messages import ToolCallPart
|
|
@@ -115,6 +116,7 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
115
116
|
wrap_validation_errors,
|
|
116
117
|
self.ctx.tracer,
|
|
117
118
|
self.ctx.trace_include_content,
|
|
119
|
+
self.ctx.instrumentation_version,
|
|
118
120
|
usage_limits,
|
|
119
121
|
)
|
|
120
122
|
|
|
@@ -203,15 +205,18 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
203
205
|
allow_partial: bool,
|
|
204
206
|
wrap_validation_errors: bool,
|
|
205
207
|
tracer: Tracer,
|
|
206
|
-
include_content: bool
|
|
208
|
+
include_content: bool,
|
|
209
|
+
instrumentation_version: int,
|
|
207
210
|
usage_limits: UsageLimits | None = None,
|
|
208
211
|
) -> Any:
|
|
209
212
|
"""See <https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span>."""
|
|
213
|
+
instrumentation_names = InstrumentationNames.for_version(instrumentation_version)
|
|
214
|
+
|
|
210
215
|
span_attributes = {
|
|
211
216
|
'gen_ai.tool.name': call.tool_name,
|
|
212
217
|
# NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai
|
|
213
218
|
'gen_ai.tool.call.id': call.tool_call_id,
|
|
214
|
-
**({
|
|
219
|
+
**({instrumentation_names.tool_arguments_attr: call.args_as_json_str()} if include_content else {}),
|
|
215
220
|
'logfire.msg': f'running tool: {call.tool_name}',
|
|
216
221
|
# add the JSON schema so these attributes are formatted nicely in Logfire
|
|
217
222
|
'logfire.json_schema': json.dumps(
|
|
@@ -220,8 +225,8 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
220
225
|
'properties': {
|
|
221
226
|
**(
|
|
222
227
|
{
|
|
223
|
-
|
|
224
|
-
|
|
228
|
+
instrumentation_names.tool_arguments_attr: {'type': 'object'},
|
|
229
|
+
instrumentation_names.tool_result_attr: {'type': 'object'},
|
|
225
230
|
}
|
|
226
231
|
if include_content
|
|
227
232
|
else {}
|
|
@@ -232,18 +237,21 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
232
237
|
}
|
|
233
238
|
),
|
|
234
239
|
}
|
|
235
|
-
with tracer.start_as_current_span(
|
|
240
|
+
with tracer.start_as_current_span(
|
|
241
|
+
instrumentation_names.get_tool_span_name(call.tool_name),
|
|
242
|
+
attributes=span_attributes,
|
|
243
|
+
) as span:
|
|
236
244
|
try:
|
|
237
245
|
tool_result = await self._call_tool(call, allow_partial, wrap_validation_errors, usage_limits)
|
|
238
246
|
except ToolRetryError as e:
|
|
239
247
|
part = e.tool_retry
|
|
240
248
|
if include_content and span.is_recording():
|
|
241
|
-
span.set_attribute(
|
|
249
|
+
span.set_attribute(instrumentation_names.tool_result_attr, part.model_response())
|
|
242
250
|
raise e
|
|
243
251
|
|
|
244
252
|
if include_content and span.is_recording():
|
|
245
253
|
span.set_attribute(
|
|
246
|
-
|
|
254
|
+
instrumentation_names.tool_result_attr,
|
|
247
255
|
tool_result
|
|
248
256
|
if isinstance(tool_result, str)
|
|
249
257
|
else _messages.tool_return_ta.dump_json(tool_result).decode(),
|
|
@@ -14,6 +14,7 @@ from opentelemetry.trace import NoOpTracer, use_span
|
|
|
14
14
|
from pydantic.json_schema import GenerateJsonSchema
|
|
15
15
|
from typing_extensions import Self, TypeVar, deprecated
|
|
16
16
|
|
|
17
|
+
from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION, InstrumentationNames
|
|
17
18
|
from pydantic_graph import Graph
|
|
18
19
|
|
|
19
20
|
from .. import (
|
|
@@ -65,7 +66,7 @@ from ..toolsets._dynamic import (
|
|
|
65
66
|
from ..toolsets.combined import CombinedToolset
|
|
66
67
|
from ..toolsets.function import FunctionToolset
|
|
67
68
|
from ..toolsets.prepared import PreparedToolset
|
|
68
|
-
from .abstract import AbstractAgent, EventStreamHandler, RunOutputDataT
|
|
69
|
+
from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT
|
|
69
70
|
from .wrapper import WrapperAgent
|
|
70
71
|
|
|
71
72
|
if TYPE_CHECKING:
|
|
@@ -136,8 +137,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
136
137
|
_deps_type: type[AgentDepsT] = dataclasses.field(repr=False)
|
|
137
138
|
_output_schema: _output.BaseOutputSchema[OutputDataT] = dataclasses.field(repr=False)
|
|
138
139
|
_output_validators: list[_output.OutputValidator[AgentDepsT, OutputDataT]] = dataclasses.field(repr=False)
|
|
139
|
-
_instructions: str |
|
|
140
|
-
_instructions_functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(repr=False)
|
|
140
|
+
_instructions: list[str | _system_prompt.SystemPromptFunc[AgentDepsT]] = dataclasses.field(repr=False)
|
|
141
141
|
_system_prompts: tuple[str, ...] = dataclasses.field(repr=False)
|
|
142
142
|
_system_prompt_functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(repr=False)
|
|
143
143
|
_system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(
|
|
@@ -163,10 +163,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
163
163
|
model: models.Model | models.KnownModelName | str | None = None,
|
|
164
164
|
*,
|
|
165
165
|
output_type: OutputSpec[OutputDataT] = str,
|
|
166
|
-
instructions:
|
|
167
|
-
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
168
|
-
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
169
|
-
| None = None,
|
|
166
|
+
instructions: Instructions[AgentDepsT] = None,
|
|
170
167
|
system_prompt: str | Sequence[str] = (),
|
|
171
168
|
deps_type: type[AgentDepsT] = NoneType,
|
|
172
169
|
name: str | None = None,
|
|
@@ -192,10 +189,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
192
189
|
model: models.Model | models.KnownModelName | str | None = None,
|
|
193
190
|
*,
|
|
194
191
|
output_type: OutputSpec[OutputDataT] = str,
|
|
195
|
-
instructions:
|
|
196
|
-
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
197
|
-
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
198
|
-
| None = None,
|
|
192
|
+
instructions: Instructions[AgentDepsT] = None,
|
|
199
193
|
system_prompt: str | Sequence[str] = (),
|
|
200
194
|
deps_type: type[AgentDepsT] = NoneType,
|
|
201
195
|
name: str | None = None,
|
|
@@ -219,10 +213,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
219
213
|
model: models.Model | models.KnownModelName | str | None = None,
|
|
220
214
|
*,
|
|
221
215
|
output_type: OutputSpec[OutputDataT] = str,
|
|
222
|
-
instructions:
|
|
223
|
-
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
224
|
-
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
225
|
-
| None = None,
|
|
216
|
+
instructions: Instructions[AgentDepsT] = None,
|
|
226
217
|
system_prompt: str | Sequence[str] = (),
|
|
227
218
|
deps_type: type[AgentDepsT] = NoneType,
|
|
228
219
|
name: str | None = None,
|
|
@@ -321,16 +312,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
321
312
|
self._output_schema = _output.OutputSchema[OutputDataT].build(output_type, default_mode=default_output_mode)
|
|
322
313
|
self._output_validators = []
|
|
323
314
|
|
|
324
|
-
self._instructions =
|
|
325
|
-
self._instructions_functions = []
|
|
326
|
-
if isinstance(instructions, str | Callable):
|
|
327
|
-
instructions = [instructions]
|
|
328
|
-
for instruction in instructions or []:
|
|
329
|
-
if isinstance(instruction, str):
|
|
330
|
-
self._instructions += instruction + '\n'
|
|
331
|
-
else:
|
|
332
|
-
self._instructions_functions.append(_system_prompt.SystemPromptRunner(instruction))
|
|
333
|
-
self._instructions = self._instructions.strip() or None
|
|
315
|
+
self._instructions = self._normalize_instructions(instructions)
|
|
334
316
|
|
|
335
317
|
self._system_prompts = (system_prompt,) if isinstance(system_prompt, str) else tuple(system_prompt)
|
|
336
318
|
self._system_prompt_functions = []
|
|
@@ -370,6 +352,9 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
370
352
|
self._override_tools: ContextVar[
|
|
371
353
|
_utils.Option[Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]]]
|
|
372
354
|
] = ContextVar('_override_tools', default=None)
|
|
355
|
+
self._override_instructions: ContextVar[
|
|
356
|
+
_utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]]
|
|
357
|
+
] = ContextVar('_override_instructions', default=None)
|
|
373
358
|
|
|
374
359
|
self._enter_lock = Lock()
|
|
375
360
|
self._entered_count = 0
|
|
@@ -592,10 +577,12 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
592
577
|
model_settings = merge_model_settings(merged_settings, model_settings)
|
|
593
578
|
usage_limits = usage_limits or _usage.UsageLimits()
|
|
594
579
|
|
|
580
|
+
instructions_literal, instructions_functions = self._get_instructions()
|
|
581
|
+
|
|
595
582
|
async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
|
|
596
583
|
parts = [
|
|
597
|
-
|
|
598
|
-
*[await func.run(run_context) for func in
|
|
584
|
+
instructions_literal,
|
|
585
|
+
*[await func.run(run_context) for func in instructions_functions],
|
|
599
586
|
]
|
|
600
587
|
|
|
601
588
|
model_profile = model_used.profile
|
|
@@ -633,22 +620,28 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
633
620
|
get_instructions=get_instructions,
|
|
634
621
|
instrumentation_settings=instrumentation_settings,
|
|
635
622
|
)
|
|
623
|
+
|
|
636
624
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
637
625
|
user_prompt=user_prompt,
|
|
638
626
|
deferred_tool_results=deferred_tool_results,
|
|
639
|
-
instructions=
|
|
640
|
-
instructions_functions=
|
|
627
|
+
instructions=instructions_literal,
|
|
628
|
+
instructions_functions=instructions_functions,
|
|
641
629
|
system_prompts=self._system_prompts,
|
|
642
630
|
system_prompt_functions=self._system_prompt_functions,
|
|
643
631
|
system_prompt_dynamic_functions=self._system_prompt_dynamic_functions,
|
|
644
632
|
)
|
|
645
633
|
|
|
646
634
|
agent_name = self.name or 'agent'
|
|
635
|
+
instrumentation_names = InstrumentationNames.for_version(
|
|
636
|
+
instrumentation_settings.version if instrumentation_settings else DEFAULT_INSTRUMENTATION_VERSION
|
|
637
|
+
)
|
|
638
|
+
|
|
647
639
|
run_span = tracer.start_span(
|
|
648
|
-
|
|
640
|
+
instrumentation_names.get_agent_run_span_name(agent_name),
|
|
649
641
|
attributes={
|
|
650
642
|
'model_name': model_used.model_name if model_used else 'no-model',
|
|
651
643
|
'agent_name': agent_name,
|
|
644
|
+
'gen_ai.agent.name': agent_name,
|
|
652
645
|
'logfire.msg': f'{agent_name} run',
|
|
653
646
|
},
|
|
654
647
|
)
|
|
@@ -684,6 +677,8 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
684
677
|
def _run_span_end_attributes(
|
|
685
678
|
self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings
|
|
686
679
|
):
|
|
680
|
+
literal_instructions, _ = self._get_instructions()
|
|
681
|
+
|
|
687
682
|
if settings.version == 1:
|
|
688
683
|
attrs = {
|
|
689
684
|
'all_messages_events': json.dumps(
|
|
@@ -696,7 +691,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
696
691
|
else:
|
|
697
692
|
attrs = {
|
|
698
693
|
'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(state.message_history)),
|
|
699
|
-
**settings.system_instructions_attributes(
|
|
694
|
+
**settings.system_instructions_attributes(literal_instructions),
|
|
700
695
|
}
|
|
701
696
|
|
|
702
697
|
return {
|
|
@@ -721,8 +716,9 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
721
716
|
model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
|
|
722
717
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
|
|
723
718
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
|
|
719
|
+
instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
|
|
724
720
|
) -> Iterator[None]:
|
|
725
|
-
"""Context manager to temporarily override agent dependencies, model, toolsets, or
|
|
721
|
+
"""Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
|
|
726
722
|
|
|
727
723
|
This is particularly useful when testing.
|
|
728
724
|
You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
|
|
@@ -732,6 +728,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
732
728
|
model: The model to use instead of the model passed to the agent run.
|
|
733
729
|
toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
|
|
734
730
|
tools: The tools to use instead of the tools registered with the agent.
|
|
731
|
+
instructions: The instructions to use instead of the instructions registered with the agent.
|
|
735
732
|
"""
|
|
736
733
|
if _utils.is_set(deps):
|
|
737
734
|
deps_token = self._override_deps.set(_utils.Some(deps))
|
|
@@ -753,6 +750,12 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
753
750
|
else:
|
|
754
751
|
tools_token = None
|
|
755
752
|
|
|
753
|
+
if _utils.is_set(instructions):
|
|
754
|
+
normalized_instructions = self._normalize_instructions(instructions)
|
|
755
|
+
instructions_token = self._override_instructions.set(_utils.Some(normalized_instructions))
|
|
756
|
+
else:
|
|
757
|
+
instructions_token = None
|
|
758
|
+
|
|
756
759
|
try:
|
|
757
760
|
yield
|
|
758
761
|
finally:
|
|
@@ -764,6 +767,8 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
764
767
|
self._override_toolsets.reset(toolsets_token)
|
|
765
768
|
if tools_token is not None:
|
|
766
769
|
self._override_tools.reset(tools_token)
|
|
770
|
+
if instructions_token is not None:
|
|
771
|
+
self._override_instructions.reset(instructions_token)
|
|
767
772
|
|
|
768
773
|
@overload
|
|
769
774
|
def instructions(
|
|
@@ -824,12 +829,12 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
824
829
|
def decorator(
|
|
825
830
|
func_: _system_prompt.SystemPromptFunc[AgentDepsT],
|
|
826
831
|
) -> _system_prompt.SystemPromptFunc[AgentDepsT]:
|
|
827
|
-
self.
|
|
832
|
+
self._instructions.append(func_)
|
|
828
833
|
return func_
|
|
829
834
|
|
|
830
835
|
return decorator
|
|
831
836
|
else:
|
|
832
|
-
self.
|
|
837
|
+
self._instructions.append(func)
|
|
833
838
|
return func
|
|
834
839
|
|
|
835
840
|
@overload
|
|
@@ -1270,6 +1275,34 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
1270
1275
|
else:
|
|
1271
1276
|
return deps
|
|
1272
1277
|
|
|
1278
|
+
def _normalize_instructions(
|
|
1279
|
+
self,
|
|
1280
|
+
instructions: Instructions[AgentDepsT],
|
|
1281
|
+
) -> list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]:
|
|
1282
|
+
if instructions is None:
|
|
1283
|
+
return []
|
|
1284
|
+
if isinstance(instructions, str) or callable(instructions):
|
|
1285
|
+
return [instructions]
|
|
1286
|
+
return list(instructions)
|
|
1287
|
+
|
|
1288
|
+
def _get_instructions(
|
|
1289
|
+
self,
|
|
1290
|
+
) -> tuple[str | None, list[_system_prompt.SystemPromptRunner[AgentDepsT]]]:
|
|
1291
|
+
override_instructions = self._override_instructions.get()
|
|
1292
|
+
instructions = override_instructions.value if override_instructions else self._instructions
|
|
1293
|
+
|
|
1294
|
+
literal_parts: list[str] = []
|
|
1295
|
+
functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = []
|
|
1296
|
+
|
|
1297
|
+
for instruction in instructions:
|
|
1298
|
+
if isinstance(instruction, str):
|
|
1299
|
+
literal_parts.append(instruction)
|
|
1300
|
+
else:
|
|
1301
|
+
functions.append(_system_prompt.SystemPromptRunner[AgentDepsT](instruction))
|
|
1302
|
+
|
|
1303
|
+
literal = '\n'.join(literal_parts).strip() or None
|
|
1304
|
+
return literal, functions
|
|
1305
|
+
|
|
1273
1306
|
def _get_toolset(
|
|
1274
1307
|
self,
|
|
1275
1308
|
output_toolset: AbstractToolset[AgentDepsT] | None | _utils.Unset = _utils.UNSET,
|
|
@@ -14,6 +14,7 @@ from pydantic_graph._utils import get_event_loop
|
|
|
14
14
|
|
|
15
15
|
from .. import (
|
|
16
16
|
_agent_graph,
|
|
17
|
+
_system_prompt,
|
|
17
18
|
_utils,
|
|
18
19
|
exceptions,
|
|
19
20
|
messages as _messages,
|
|
@@ -60,6 +61,14 @@ EventStreamHandler: TypeAlias = Callable[
|
|
|
60
61
|
"""A function that receives agent [`RunContext`][pydantic_ai.tools.RunContext] and an async iterable of events from the model's streaming response and the agent's execution of tools."""
|
|
61
62
|
|
|
62
63
|
|
|
64
|
+
Instructions = (
|
|
65
|
+
str
|
|
66
|
+
| _system_prompt.SystemPromptFunc[AgentDepsT]
|
|
67
|
+
| Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]]
|
|
68
|
+
| None
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
63
72
|
class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
|
|
64
73
|
"""Abstract superclass for [`Agent`][pydantic_ai.agent.Agent], [`WrapperAgent`][pydantic_ai.agent.WrapperAgent], and your own custom agent implementations."""
|
|
65
74
|
|
|
@@ -681,8 +690,9 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
|
|
|
681
690
|
model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
|
|
682
691
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
|
|
683
692
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
|
|
693
|
+
instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
|
|
684
694
|
) -> Iterator[None]:
|
|
685
|
-
"""Context manager to temporarily override agent dependencies, model, toolsets, or
|
|
695
|
+
"""Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
|
|
686
696
|
|
|
687
697
|
This is particularly useful when testing.
|
|
688
698
|
You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
|
|
@@ -692,6 +702,7 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
|
|
|
692
702
|
model: The model to use instead of the model passed to the agent run.
|
|
693
703
|
toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
|
|
694
704
|
tools: The tools to use instead of the tools registered with the agent.
|
|
705
|
+
instructions: The instructions to use instead of the instructions registered with the agent.
|
|
695
706
|
"""
|
|
696
707
|
raise NotImplementedError
|
|
697
708
|
yield
|
|
@@ -20,7 +20,7 @@ from ..tools import (
|
|
|
20
20
|
ToolFuncEither,
|
|
21
21
|
)
|
|
22
22
|
from ..toolsets import AbstractToolset
|
|
23
|
-
from .abstract import AbstractAgent, EventStreamHandler, RunOutputDataT
|
|
23
|
+
from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
@@ -214,8 +214,9 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
214
214
|
model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
|
|
215
215
|
toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
|
|
216
216
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
|
|
217
|
+
instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
|
|
217
218
|
) -> Iterator[None]:
|
|
218
|
-
"""Context manager to temporarily override agent dependencies, model, toolsets, or
|
|
219
|
+
"""Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
|
|
219
220
|
|
|
220
221
|
This is particularly useful when testing.
|
|
221
222
|
You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
|
|
@@ -225,6 +226,13 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
225
226
|
model: The model to use instead of the model passed to the agent run.
|
|
226
227
|
toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
|
|
227
228
|
tools: The tools to use instead of the tools registered with the agent.
|
|
229
|
+
instructions: The instructions to use instead of the instructions registered with the agent.
|
|
228
230
|
"""
|
|
229
|
-
with self.wrapped.override(
|
|
231
|
+
with self.wrapped.override(
|
|
232
|
+
deps=deps,
|
|
233
|
+
model=model,
|
|
234
|
+
toolsets=toolsets,
|
|
235
|
+
tools=tools,
|
|
236
|
+
instructions=instructions,
|
|
237
|
+
):
|
|
230
238
|
yield
|
|
@@ -81,7 +81,7 @@ async def model_request(
|
|
|
81
81
|
return await model_instance.request(
|
|
82
82
|
messages,
|
|
83
83
|
model_settings,
|
|
84
|
-
|
|
84
|
+
model_request_parameters or models.ModelRequestParameters(),
|
|
85
85
|
)
|
|
86
86
|
|
|
87
87
|
|
|
@@ -193,7 +193,7 @@ def model_request_stream(
|
|
|
193
193
|
return model_instance.request_stream(
|
|
194
194
|
messages,
|
|
195
195
|
model_settings,
|
|
196
|
-
|
|
196
|
+
model_request_parameters or models.ModelRequestParameters(),
|
|
197
197
|
)
|
|
198
198
|
|
|
199
199
|
|