pydantic-ai-slim 0.2.5__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/PKG-INFO +4 -4
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_agent_graph.py +8 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_cli.py +8 -9
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/agent.py +11 -5
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/__init__.py +1 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/google.py +0 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/openai.py +1 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/tools.py +32 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/.gitignore +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/LICENSE +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/README.md +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.6}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.6
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.2.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.2.6
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.2.
|
|
37
|
+
Requires-Dist: fasta2a==0.2.6; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.49.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.2.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.2.6; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -26,7 +26,7 @@ from . import (
|
|
|
26
26
|
)
|
|
27
27
|
from .result import OutputDataT, ToolOutput
|
|
28
28
|
from .settings import ModelSettings, merge_model_settings
|
|
29
|
-
from .tools import RunContext, Tool, ToolDefinition
|
|
29
|
+
from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
|
|
30
30
|
|
|
31
31
|
if TYPE_CHECKING:
|
|
32
32
|
from .mcp import MCPServer
|
|
@@ -97,6 +97,8 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
97
97
|
|
|
98
98
|
tracer: Tracer
|
|
99
99
|
|
|
100
|
+
prepare_tools: ToolsPrepareFunc[DepsT] | None = None
|
|
101
|
+
|
|
100
102
|
|
|
101
103
|
class AgentNode(BaseNode[GraphAgentState, GraphAgentDeps[DepsT, Any], result.FinalResult[NodeRunEndT]]):
|
|
102
104
|
"""The base class for all agent nodes.
|
|
@@ -241,6 +243,11 @@ async def _prepare_request_parameters(
|
|
|
241
243
|
*map(add_mcp_server_tools, ctx.deps.mcp_servers),
|
|
242
244
|
)
|
|
243
245
|
|
|
246
|
+
if ctx.deps.prepare_tools:
|
|
247
|
+
# Prepare the tools using the provided function
|
|
248
|
+
# This also acts over tool definitions pulled from MCP servers
|
|
249
|
+
function_tool_defs = await ctx.deps.prepare_tools(run_context, function_tool_defs) or []
|
|
250
|
+
|
|
244
251
|
output_schema = ctx.deps.output_schema
|
|
245
252
|
return models.ModelRequestParameters(
|
|
246
253
|
function_tools=function_tool_defs,
|
|
@@ -53,7 +53,7 @@ PYDANTIC_AI_HOME = Path.home() / '.pydantic-ai'
|
|
|
53
53
|
This folder is used to store the prompt history and configuration.
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
PROMPT_HISTORY_FILENAME = 'prompt-history.txt'
|
|
57
57
|
|
|
58
58
|
|
|
59
59
|
class SimpleCodeBlock(CodeBlock):
|
|
@@ -211,27 +211,26 @@ Special prompts:
|
|
|
211
211
|
pass
|
|
212
212
|
return 0
|
|
213
213
|
|
|
214
|
-
# Ensure the history directory and file exist
|
|
215
|
-
PROMPT_HISTORY_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
216
|
-
PROMPT_HISTORY_PATH.touch(exist_ok=True)
|
|
217
|
-
|
|
218
|
-
# doing this instead of `PromptSession[Any](history=` allows mocking of PromptSession in tests
|
|
219
|
-
session: PromptSession[Any] = PromptSession(history=FileHistory(str(PROMPT_HISTORY_PATH)))
|
|
220
214
|
try:
|
|
221
|
-
return asyncio.run(run_chat(
|
|
215
|
+
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name))
|
|
222
216
|
except KeyboardInterrupt: # pragma: no cover
|
|
223
217
|
return 0
|
|
224
218
|
|
|
225
219
|
|
|
226
220
|
async def run_chat(
|
|
227
|
-
session: PromptSession[Any],
|
|
228
221
|
stream: bool,
|
|
229
222
|
agent: Agent[AgentDepsT, OutputDataT],
|
|
230
223
|
console: Console,
|
|
231
224
|
code_theme: str,
|
|
232
225
|
prog_name: str,
|
|
226
|
+
config_dir: Path | None = None,
|
|
233
227
|
deps: AgentDepsT = None,
|
|
234
228
|
) -> int:
|
|
229
|
+
prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
|
|
230
|
+
prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
|
|
231
|
+
prompt_history_path.touch(exist_ok=True)
|
|
232
|
+
session: PromptSession[Any] = PromptSession(history=FileHistory(str(prompt_history_path)))
|
|
233
|
+
|
|
235
234
|
multiline = False
|
|
236
235
|
messages: list[ModelMessage] = []
|
|
237
236
|
|
|
@@ -42,6 +42,7 @@ from .tools import (
|
|
|
42
42
|
ToolFuncPlain,
|
|
43
43
|
ToolParams,
|
|
44
44
|
ToolPrepareFunc,
|
|
45
|
+
ToolsPrepareFunc,
|
|
45
46
|
)
|
|
46
47
|
|
|
47
48
|
# Re-exporting like this improves auto-import behavior in PyCharm
|
|
@@ -148,6 +149,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
148
149
|
_system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(
|
|
149
150
|
repr=False
|
|
150
151
|
)
|
|
152
|
+
_prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = dataclasses.field(repr=False)
|
|
151
153
|
_function_tools: dict[str, Tool[AgentDepsT]] = dataclasses.field(repr=False)
|
|
152
154
|
_mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
|
|
153
155
|
_default_retries: int = dataclasses.field(repr=False)
|
|
@@ -172,6 +174,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
172
174
|
retries: int = 1,
|
|
173
175
|
output_retries: int | None = None,
|
|
174
176
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
177
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
175
178
|
mcp_servers: Sequence[MCPServer] = (),
|
|
176
179
|
defer_model_check: bool = False,
|
|
177
180
|
end_strategy: EndStrategy = 'early',
|
|
@@ -200,6 +203,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
200
203
|
result_tool_description: str | None = None,
|
|
201
204
|
result_retries: int | None = None,
|
|
202
205
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
206
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
203
207
|
mcp_servers: Sequence[MCPServer] = (),
|
|
204
208
|
defer_model_check: bool = False,
|
|
205
209
|
end_strategy: EndStrategy = 'early',
|
|
@@ -223,6 +227,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
223
227
|
retries: int = 1,
|
|
224
228
|
output_retries: int | None = None,
|
|
225
229
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
230
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
226
231
|
mcp_servers: Sequence[MCPServer] = (),
|
|
227
232
|
defer_model_check: bool = False,
|
|
228
233
|
end_strategy: EndStrategy = 'early',
|
|
@@ -251,6 +256,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
251
256
|
output_retries: The maximum number of retries to allow for result validation, defaults to `retries`.
|
|
252
257
|
tools: Tools to register with the agent, you can also register tools via the decorators
|
|
253
258
|
[`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain].
|
|
259
|
+
prepare_tools: custom method to prepare the tool definition of all tools for each step.
|
|
260
|
+
This is useful if you want to customize the definition of multiple tools or you want to register
|
|
261
|
+
a subset of tools for a given step. See [`ToolsPrepareFunc`][pydantic_ai.tools.ToolsPrepareFunc]
|
|
254
262
|
mcp_servers: MCP servers to register with the agent. You should register a [`MCPServer`][pydantic_ai.mcp.MCPServer]
|
|
255
263
|
for each server you want the agent to connect to.
|
|
256
264
|
defer_model_check: by default, if you provide a [named][pydantic_ai.models.KnownModelName] model,
|
|
@@ -334,6 +342,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
334
342
|
self._default_retries = retries
|
|
335
343
|
self._max_result_retries = output_retries if output_retries is not None else retries
|
|
336
344
|
self._mcp_servers = mcp_servers
|
|
345
|
+
self._prepare_tools = prepare_tools
|
|
337
346
|
for tool in tools:
|
|
338
347
|
if isinstance(tool, Tool):
|
|
339
348
|
self._register_tool(tool)
|
|
@@ -694,6 +703,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
694
703
|
mcp_servers=self._mcp_servers,
|
|
695
704
|
default_retries=self._default_retries,
|
|
696
705
|
tracer=tracer,
|
|
706
|
+
prepare_tools=self._prepare_tools,
|
|
697
707
|
get_instructions=get_instructions,
|
|
698
708
|
)
|
|
699
709
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
@@ -1763,18 +1773,14 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1763
1773
|
await agent.to_cli()
|
|
1764
1774
|
```
|
|
1765
1775
|
"""
|
|
1766
|
-
from prompt_toolkit import PromptSession
|
|
1767
|
-
from prompt_toolkit.history import FileHistory
|
|
1768
1776
|
from rich.console import Console
|
|
1769
1777
|
|
|
1770
|
-
from pydantic_ai._cli import
|
|
1778
|
+
from pydantic_ai._cli import run_chat
|
|
1771
1779
|
|
|
1772
1780
|
# TODO(Marcelo): We need to refactor the CLI code to be able to be able to just pass `agent`, `deps` and
|
|
1773
1781
|
# `prog_name` from here.
|
|
1774
1782
|
|
|
1775
|
-
session: PromptSession[Any] = PromptSession(history=FileHistory(str(PROMPT_HISTORY_PATH)))
|
|
1776
1783
|
await run_chat(
|
|
1777
|
-
session=session,
|
|
1778
1784
|
stream=True,
|
|
1779
1785
|
agent=self,
|
|
1780
1786
|
deps=deps,
|
|
@@ -491,7 +491,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
|
|
|
491
491
|
from .cohere import CohereModel
|
|
492
492
|
|
|
493
493
|
return CohereModel(model_name, provider=provider)
|
|
494
|
-
elif provider in ('deepseek', 'openai', 'azure'):
|
|
494
|
+
elif provider in ('deepseek', 'openai', 'azure', 'openrouter'):
|
|
495
495
|
from .openai import OpenAIModel
|
|
496
496
|
|
|
497
497
|
return OpenAIModel(model_name, provider=provider)
|
|
@@ -279,7 +279,6 @@ class GoogleModel(Model):
|
|
|
279
279
|
def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
|
|
280
280
|
if not response.candidates or len(response.candidates) != 1:
|
|
281
281
|
raise UnexpectedModelBehavior('Expected exactly one candidate in Gemini response') # pragma: no cover
|
|
282
|
-
print(response.candidates[0].safety_ratings)
|
|
283
282
|
if response.candidates[0].content is None or response.candidates[0].content.parts is None:
|
|
284
283
|
if response.candidates[0].finish_reason == 'SAFETY':
|
|
285
284
|
raise UnexpectedModelBehavior('Safety settings triggered', str(response))
|
|
@@ -170,7 +170,7 @@ class OpenAIModel(Model):
|
|
|
170
170
|
self,
|
|
171
171
|
model_name: OpenAIModelName,
|
|
172
172
|
*,
|
|
173
|
-
provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] = 'openai',
|
|
173
|
+
provider: Literal['openai', 'deepseek', 'azure', 'openrouter'] | Provider[AsyncOpenAI] = 'openai',
|
|
174
174
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
175
175
|
):
|
|
176
176
|
"""Initialize an OpenAI model.
|
|
@@ -29,6 +29,7 @@ __all__ = (
|
|
|
29
29
|
'ToolFuncEither',
|
|
30
30
|
'ToolParams',
|
|
31
31
|
'ToolPrepareFunc',
|
|
32
|
+
'ToolsPrepareFunc',
|
|
32
33
|
'Tool',
|
|
33
34
|
'ObjectJsonSchema',
|
|
34
35
|
'ToolDefinition',
|
|
@@ -133,6 +134,37 @@ hitchhiker = Tool(hitchhiker, prepare=only_if_42)
|
|
|
133
134
|
Usage `ToolPrepareFunc[AgentDepsT]`.
|
|
134
135
|
"""
|
|
135
136
|
|
|
137
|
+
ToolsPrepareFunc: TypeAlias = (
|
|
138
|
+
'Callable[[RunContext[AgentDepsT], list[ToolDefinition]], Awaitable[list[ToolDefinition] | None]]'
|
|
139
|
+
)
|
|
140
|
+
"""Definition of a function that can prepare the tool definition of all tools for each step.
|
|
141
|
+
This is useful if you want to customize the definition of multiple tools or you want to register
|
|
142
|
+
a subset of tools for a given step.
|
|
143
|
+
|
|
144
|
+
Example — here `turn_on_strict_if_openai` is valid as a `ToolsPrepareFunc`:
|
|
145
|
+
|
|
146
|
+
```python {noqa="I001"}
|
|
147
|
+
from dataclasses import replace
|
|
148
|
+
from typing import Union
|
|
149
|
+
|
|
150
|
+
from pydantic_ai import Agent, RunContext
|
|
151
|
+
from pydantic_ai.tools import ToolDefinition
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
async def turn_on_strict_if_openai(
|
|
155
|
+
ctx: RunContext[None], tool_defs: list[ToolDefinition]
|
|
156
|
+
) -> Union[list[ToolDefinition], None]:
|
|
157
|
+
if ctx.model.system == 'openai':
|
|
158
|
+
return [replace(tool_def, strict=True) for tool_def in tool_defs]
|
|
159
|
+
return tool_defs
|
|
160
|
+
|
|
161
|
+
agent = Agent('openai:gpt-4o', prepare_tools=turn_on_strict_if_openai)
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
Usage `ToolsPrepareFunc[AgentDepsT]`.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
|
|
136
168
|
DocstringFormat = Literal['google', 'numpy', 'sphinx', 'auto']
|
|
137
169
|
"""Supported docstring formats.
|
|
138
170
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|