pydantic-ai-slim 0.2.5__tar.gz → 0.2.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/PKG-INFO +4 -4
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_agent_graph.py +25 -5
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_cli.py +8 -9
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_output.py +6 -2
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_parts_manager.py +5 -5
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_pydantic.py +8 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_utils.py +9 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/agent.py +35 -54
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/direct.py +7 -31
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/mcp.py +59 -7
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/messages.py +72 -29
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/__init__.py +1 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/anthropic.py +80 -87
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/bedrock.py +2 -2
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/google.py +1 -2
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/mistral.py +1 -1
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/openai.py +1 -2
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/result.py +5 -3
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/tools.py +39 -3
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/usage.py +7 -2
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/.gitignore +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/LICENSE +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/README.md +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.2.5 → pydantic_ai_slim-0.2.7}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.7
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.2.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.2.7
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.2.
|
|
37
|
+
Requires-Dist: fasta2a==0.2.7; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.49.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.2.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.2.7; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -26,7 +26,7 @@ from . import (
|
|
|
26
26
|
)
|
|
27
27
|
from .result import OutputDataT, ToolOutput
|
|
28
28
|
from .settings import ModelSettings, merge_model_settings
|
|
29
|
-
from .tools import RunContext, Tool, ToolDefinition
|
|
29
|
+
from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
|
|
30
30
|
|
|
31
31
|
if TYPE_CHECKING:
|
|
32
32
|
from .mcp import MCPServer
|
|
@@ -97,6 +97,8 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
97
97
|
|
|
98
98
|
tracer: Tracer
|
|
99
99
|
|
|
100
|
+
prepare_tools: ToolsPrepareFunc[DepsT] | None = None
|
|
101
|
+
|
|
100
102
|
|
|
101
103
|
class AgentNode(BaseNode[GraphAgentState, GraphAgentDeps[DepsT, Any], result.FinalResult[NodeRunEndT]]):
|
|
102
104
|
"""The base class for all agent nodes.
|
|
@@ -220,26 +222,44 @@ async def _prepare_request_parameters(
|
|
|
220
222
|
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
221
223
|
) -> models.ModelRequestParameters:
|
|
222
224
|
"""Build tools and create an agent model."""
|
|
223
|
-
|
|
225
|
+
function_tool_defs_map: dict[str, ToolDefinition] = {}
|
|
224
226
|
|
|
225
227
|
run_context = build_run_context(ctx)
|
|
226
228
|
|
|
227
229
|
async def add_tool(tool: Tool[DepsT]) -> None:
|
|
228
230
|
ctx = run_context.replace_with(retry=tool.current_retry, tool_name=tool.name)
|
|
229
231
|
if tool_def := await tool.prepare_tool_def(ctx):
|
|
230
|
-
|
|
232
|
+
# prepare_tool_def may change tool_def.name
|
|
233
|
+
if tool_def.name in function_tool_defs_map:
|
|
234
|
+
if tool_def.name != tool.name:
|
|
235
|
+
# Prepare tool def may have renamed the tool
|
|
236
|
+
raise exceptions.UserError(
|
|
237
|
+
f"Renaming tool '{tool.name}' to '{tool_def.name}' conflicts with existing tool."
|
|
238
|
+
)
|
|
239
|
+
else:
|
|
240
|
+
raise exceptions.UserError(f'Tool name conflicts with existing tool: {tool.name!r}.')
|
|
241
|
+
function_tool_defs_map[tool_def.name] = tool_def
|
|
231
242
|
|
|
232
243
|
async def add_mcp_server_tools(server: MCPServer) -> None:
|
|
233
244
|
if not server.is_running:
|
|
234
245
|
raise exceptions.UserError(f'MCP server is not running: {server}')
|
|
235
246
|
tool_defs = await server.list_tools()
|
|
236
|
-
|
|
237
|
-
|
|
247
|
+
for tool_def in tool_defs:
|
|
248
|
+
if tool_def.name in function_tool_defs_map:
|
|
249
|
+
raise exceptions.UserError(
|
|
250
|
+
f"MCP Server '{server}' defines a tool whose name conflicts with existing tool: {tool_def.name!r}. Consider using `tool_prefix` to avoid name conflicts."
|
|
251
|
+
)
|
|
252
|
+
function_tool_defs_map[tool_def.name] = tool_def
|
|
238
253
|
|
|
239
254
|
await asyncio.gather(
|
|
240
255
|
*map(add_tool, ctx.deps.function_tools.values()),
|
|
241
256
|
*map(add_mcp_server_tools, ctx.deps.mcp_servers),
|
|
242
257
|
)
|
|
258
|
+
function_tool_defs = list(function_tool_defs_map.values())
|
|
259
|
+
if ctx.deps.prepare_tools:
|
|
260
|
+
# Prepare the tools using the provided function
|
|
261
|
+
# This also acts over tool definitions pulled from MCP servers
|
|
262
|
+
function_tool_defs = await ctx.deps.prepare_tools(run_context, function_tool_defs) or []
|
|
243
263
|
|
|
244
264
|
output_schema = ctx.deps.output_schema
|
|
245
265
|
return models.ModelRequestParameters(
|
|
@@ -53,7 +53,7 @@ PYDANTIC_AI_HOME = Path.home() / '.pydantic-ai'
|
|
|
53
53
|
This folder is used to store the prompt history and configuration.
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
PROMPT_HISTORY_FILENAME = 'prompt-history.txt'
|
|
57
57
|
|
|
58
58
|
|
|
59
59
|
class SimpleCodeBlock(CodeBlock):
|
|
@@ -211,27 +211,26 @@ Special prompts:
|
|
|
211
211
|
pass
|
|
212
212
|
return 0
|
|
213
213
|
|
|
214
|
-
# Ensure the history directory and file exist
|
|
215
|
-
PROMPT_HISTORY_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
216
|
-
PROMPT_HISTORY_PATH.touch(exist_ok=True)
|
|
217
|
-
|
|
218
|
-
# doing this instead of `PromptSession[Any](history=` allows mocking of PromptSession in tests
|
|
219
|
-
session: PromptSession[Any] = PromptSession(history=FileHistory(str(PROMPT_HISTORY_PATH)))
|
|
220
214
|
try:
|
|
221
|
-
return asyncio.run(run_chat(
|
|
215
|
+
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name))
|
|
222
216
|
except KeyboardInterrupt: # pragma: no cover
|
|
223
217
|
return 0
|
|
224
218
|
|
|
225
219
|
|
|
226
220
|
async def run_chat(
|
|
227
|
-
session: PromptSession[Any],
|
|
228
221
|
stream: bool,
|
|
229
222
|
agent: Agent[AgentDepsT, OutputDataT],
|
|
230
223
|
console: Console,
|
|
231
224
|
code_theme: str,
|
|
232
225
|
prog_name: str,
|
|
226
|
+
config_dir: Path | None = None,
|
|
233
227
|
deps: AgentDepsT = None,
|
|
234
228
|
) -> int:
|
|
229
|
+
prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
|
|
230
|
+
prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
|
|
231
|
+
prompt_history_path.touch(exist_ok=True)
|
|
232
|
+
session: PromptSession[Any] = PromptSession(history=FileHistory(str(prompt_history_path)))
|
|
233
|
+
|
|
235
234
|
multiline = False
|
|
236
235
|
messages: list[ModelMessage] = []
|
|
237
236
|
|
|
@@ -231,9 +231,13 @@ class OutputSchemaTool(Generic[OutputDataT]):
|
|
|
231
231
|
try:
|
|
232
232
|
pyd_allow_partial: Literal['off', 'trailing-strings'] = 'trailing-strings' if allow_partial else 'off'
|
|
233
233
|
if isinstance(tool_call.args, str):
|
|
234
|
-
output = self.type_adapter.validate_json(
|
|
234
|
+
output = self.type_adapter.validate_json(
|
|
235
|
+
tool_call.args or '{}', experimental_allow_partial=pyd_allow_partial
|
|
236
|
+
)
|
|
235
237
|
else:
|
|
236
|
-
output = self.type_adapter.validate_python(
|
|
238
|
+
output = self.type_adapter.validate_python(
|
|
239
|
+
tool_call.args or {}, experimental_allow_partial=pyd_allow_partial
|
|
240
|
+
)
|
|
237
241
|
except ValidationError as e:
|
|
238
242
|
if wrap_validation_errors:
|
|
239
243
|
m = _messages.RetryPromptPart(
|
|
@@ -132,7 +132,7 @@ class ModelResponsePartsManager:
|
|
|
132
132
|
) -> ModelResponseStreamEvent | None:
|
|
133
133
|
"""Handle or update a tool call, creating or updating a `ToolCallPart` or `ToolCallPartDelta`.
|
|
134
134
|
|
|
135
|
-
Managed items remain as `ToolCallPartDelta`s until they have
|
|
135
|
+
Managed items remain as `ToolCallPartDelta`s until they have at least a tool_name, at which
|
|
136
136
|
point they are upgraded to `ToolCallPart`s.
|
|
137
137
|
|
|
138
138
|
If `vendor_part_id` is None, updates the latest matching ToolCallPart (or ToolCallPartDelta)
|
|
@@ -143,11 +143,11 @@ class ModelResponsePartsManager:
|
|
|
143
143
|
If None, the latest matching tool call may be updated.
|
|
144
144
|
tool_name: The name of the tool. If None, the manager does not enforce
|
|
145
145
|
a name match when `vendor_part_id` is None.
|
|
146
|
-
args: Arguments for the tool call, either as a string
|
|
146
|
+
args: Arguments for the tool call, either as a string, a dictionary of key-value pairs, or None.
|
|
147
147
|
tool_call_id: An optional string representing an identifier for this tool call.
|
|
148
148
|
|
|
149
149
|
Returns:
|
|
150
|
-
- A `PartStartEvent` if a new
|
|
150
|
+
- A `PartStartEvent` if a new ToolCallPart is created.
|
|
151
151
|
- A `PartDeltaEvent` if an existing part is updated.
|
|
152
152
|
- `None` if no new event is emitted (e.g., the part is still incomplete).
|
|
153
153
|
|
|
@@ -207,7 +207,7 @@ class ModelResponsePartsManager:
|
|
|
207
207
|
*,
|
|
208
208
|
vendor_part_id: Hashable | None,
|
|
209
209
|
tool_name: str,
|
|
210
|
-
args: str | dict[str, Any],
|
|
210
|
+
args: str | dict[str, Any] | None,
|
|
211
211
|
tool_call_id: str | None = None,
|
|
212
212
|
) -> ModelResponseStreamEvent:
|
|
213
213
|
"""Immediately create or fully-overwrite a ToolCallPart with the given information.
|
|
@@ -218,7 +218,7 @@ class ModelResponsePartsManager:
|
|
|
218
218
|
vendor_part_id: The vendor's ID for this tool call part. If not
|
|
219
219
|
None and an existing part is found, that part is overwritten.
|
|
220
220
|
tool_name: The name of the tool being invoked.
|
|
221
|
-
args: The arguments for the tool call, either as a string
|
|
221
|
+
args: The arguments for the tool call, either as a string, a dictionary, or None.
|
|
222
222
|
tool_call_id: An optional string identifier for this tool call.
|
|
223
223
|
|
|
224
224
|
Returns:
|
|
@@ -76,8 +76,15 @@ def function_schema( # noqa: C901
|
|
|
76
76
|
description, field_descriptions = doc_descriptions(function, sig, docstring_format=docstring_format)
|
|
77
77
|
|
|
78
78
|
if require_parameter_descriptions:
|
|
79
|
-
if
|
|
79
|
+
if takes_ctx:
|
|
80
|
+
parameters_without_ctx = set(
|
|
81
|
+
name for name in sig.parameters if not _is_call_ctx(sig.parameters[name].annotation)
|
|
82
|
+
)
|
|
83
|
+
missing_params = parameters_without_ctx - set(field_descriptions)
|
|
84
|
+
else:
|
|
80
85
|
missing_params = set(sig.parameters) - set(field_descriptions)
|
|
86
|
+
|
|
87
|
+
if missing_params:
|
|
81
88
|
errors.append(f'Missing parameter descriptions for {", ".join(missing_params)}')
|
|
82
89
|
|
|
83
90
|
for index, (name, p) in enumerate(sig.parameters.items()):
|
|
@@ -5,7 +5,7 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
from collections.abc import AsyncIterable, AsyncIterator, Iterator
|
|
7
7
|
from contextlib import asynccontextmanager, suppress
|
|
8
|
-
from dataclasses import dataclass, is_dataclass
|
|
8
|
+
from dataclasses import dataclass, fields, is_dataclass
|
|
9
9
|
from datetime import datetime, timezone
|
|
10
10
|
from functools import partial
|
|
11
11
|
from types import GenericAlias
|
|
@@ -290,3 +290,11 @@ class PeekableAsyncStream(Generic[T]):
|
|
|
290
290
|
|
|
291
291
|
def get_traceparent(x: AgentRun | AgentRunResult | GraphRun | GraphRunResult) -> str:
|
|
292
292
|
return x._traceparent(required=False) or '' # type: ignore[reportPrivateUsage]
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def dataclasses_no_defaults_repr(self: Any) -> str:
|
|
296
|
+
"""Exclude fields with values equal to the field default."""
|
|
297
|
+
kv_pairs = (
|
|
298
|
+
f'{f.name}={getattr(self, f.name)!r}' for f in fields(self) if f.repr and getattr(self, f.name) != f.default
|
|
299
|
+
)
|
|
300
|
+
return f'{self.__class__.__qualname__}({", ".join(kv_pairs)})'
|
|
@@ -42,6 +42,7 @@ from .tools import (
|
|
|
42
42
|
ToolFuncPlain,
|
|
43
43
|
ToolParams,
|
|
44
44
|
ToolPrepareFunc,
|
|
45
|
+
ToolsPrepareFunc,
|
|
45
46
|
)
|
|
46
47
|
|
|
47
48
|
# Re-exporting like this improves auto-import behavior in PyCharm
|
|
@@ -148,6 +149,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
148
149
|
_system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(
|
|
149
150
|
repr=False
|
|
150
151
|
)
|
|
152
|
+
_prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = dataclasses.field(repr=False)
|
|
151
153
|
_function_tools: dict[str, Tool[AgentDepsT]] = dataclasses.field(repr=False)
|
|
152
154
|
_mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
|
|
153
155
|
_default_retries: int = dataclasses.field(repr=False)
|
|
@@ -172,6 +174,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
172
174
|
retries: int = 1,
|
|
173
175
|
output_retries: int | None = None,
|
|
174
176
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
177
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
175
178
|
mcp_servers: Sequence[MCPServer] = (),
|
|
176
179
|
defer_model_check: bool = False,
|
|
177
180
|
end_strategy: EndStrategy = 'early',
|
|
@@ -200,6 +203,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
200
203
|
result_tool_description: str | None = None,
|
|
201
204
|
result_retries: int | None = None,
|
|
202
205
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
206
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
203
207
|
mcp_servers: Sequence[MCPServer] = (),
|
|
204
208
|
defer_model_check: bool = False,
|
|
205
209
|
end_strategy: EndStrategy = 'early',
|
|
@@ -223,6 +227,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
223
227
|
retries: int = 1,
|
|
224
228
|
output_retries: int | None = None,
|
|
225
229
|
tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = (),
|
|
230
|
+
prepare_tools: ToolsPrepareFunc[AgentDepsT] | None = None,
|
|
226
231
|
mcp_servers: Sequence[MCPServer] = (),
|
|
227
232
|
defer_model_check: bool = False,
|
|
228
233
|
end_strategy: EndStrategy = 'early',
|
|
@@ -251,6 +256,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
251
256
|
output_retries: The maximum number of retries to allow for result validation, defaults to `retries`.
|
|
252
257
|
tools: Tools to register with the agent, you can also register tools via the decorators
|
|
253
258
|
[`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain].
|
|
259
|
+
prepare_tools: custom method to prepare the tool definition of all tools for each step.
|
|
260
|
+
This is useful if you want to customize the definition of multiple tools or you want to register
|
|
261
|
+
a subset of tools for a given step. See [`ToolsPrepareFunc`][pydantic_ai.tools.ToolsPrepareFunc]
|
|
254
262
|
mcp_servers: MCP servers to register with the agent. You should register a [`MCPServer`][pydantic_ai.mcp.MCPServer]
|
|
255
263
|
for each server you want the agent to connect to.
|
|
256
264
|
defer_model_check: by default, if you provide a [named][pydantic_ai.models.KnownModelName] model,
|
|
@@ -334,6 +342,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
334
342
|
self._default_retries = retries
|
|
335
343
|
self._max_result_retries = output_retries if output_retries is not None else retries
|
|
336
344
|
self._mcp_servers = mcp_servers
|
|
345
|
+
self._prepare_tools = prepare_tools
|
|
337
346
|
for tool in tools:
|
|
338
347
|
if isinstance(tool, Tool):
|
|
339
348
|
self._register_tool(tool)
|
|
@@ -565,30 +574,21 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
565
574
|
UserPromptPart(
|
|
566
575
|
content='What is the capital of France?',
|
|
567
576
|
timestamp=datetime.datetime(...),
|
|
568
|
-
part_kind='user-prompt',
|
|
569
577
|
)
|
|
570
|
-
]
|
|
571
|
-
instructions=None,
|
|
572
|
-
kind='request',
|
|
578
|
+
]
|
|
573
579
|
)
|
|
574
580
|
),
|
|
575
581
|
CallToolsNode(
|
|
576
582
|
model_response=ModelResponse(
|
|
577
|
-
parts=[TextPart(content='Paris'
|
|
583
|
+
parts=[TextPart(content='Paris')],
|
|
578
584
|
usage=Usage(
|
|
579
|
-
requests=1,
|
|
580
|
-
request_tokens=56,
|
|
581
|
-
response_tokens=1,
|
|
582
|
-
total_tokens=57,
|
|
583
|
-
details=None,
|
|
585
|
+
requests=1, request_tokens=56, response_tokens=1, total_tokens=57
|
|
584
586
|
),
|
|
585
587
|
model_name='gpt-4o',
|
|
586
588
|
timestamp=datetime.datetime(...),
|
|
587
|
-
kind='response',
|
|
588
|
-
vendor_id=None,
|
|
589
589
|
)
|
|
590
590
|
),
|
|
591
|
-
End(data=FinalResult(output='Paris'
|
|
591
|
+
End(data=FinalResult(output='Paris')),
|
|
592
592
|
]
|
|
593
593
|
'''
|
|
594
594
|
print(agent_run.result.output)
|
|
@@ -694,6 +694,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
694
694
|
mcp_servers=self._mcp_servers,
|
|
695
695
|
default_retries=self._default_retries,
|
|
696
696
|
tracer=tracer,
|
|
697
|
+
prepare_tools=self._prepare_tools,
|
|
697
698
|
get_instructions=get_instructions,
|
|
698
699
|
)
|
|
699
700
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
@@ -1750,9 +1751,13 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1750
1751
|
lifespan=lifespan,
|
|
1751
1752
|
)
|
|
1752
1753
|
|
|
1753
|
-
async def to_cli(self: Self, deps: AgentDepsT = None) -> None:
|
|
1754
|
+
async def to_cli(self: Self, deps: AgentDepsT = None, prog_name: str = 'pydantic-ai') -> None:
|
|
1754
1755
|
"""Run the agent in a CLI chat interface.
|
|
1755
1756
|
|
|
1757
|
+
Args:
|
|
1758
|
+
deps: The dependencies to pass to the agent.
|
|
1759
|
+
prog_name: The name of the program to use for the CLI. Defaults to 'pydantic-ai'.
|
|
1760
|
+
|
|
1756
1761
|
Example:
|
|
1757
1762
|
```python {title="agent_to_cli.py" test="skip"}
|
|
1758
1763
|
from pydantic_ai import Agent
|
|
@@ -1763,37 +1768,28 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1763
1768
|
await agent.to_cli()
|
|
1764
1769
|
```
|
|
1765
1770
|
"""
|
|
1766
|
-
from prompt_toolkit import PromptSession
|
|
1767
|
-
from prompt_toolkit.history import FileHistory
|
|
1768
1771
|
from rich.console import Console
|
|
1769
1772
|
|
|
1770
|
-
from pydantic_ai._cli import
|
|
1773
|
+
from pydantic_ai._cli import run_chat
|
|
1771
1774
|
|
|
1772
|
-
|
|
1773
|
-
# `prog_name` from here.
|
|
1775
|
+
await run_chat(stream=True, agent=self, deps=deps, console=Console(), code_theme='monokai', prog_name=prog_name)
|
|
1774
1776
|
|
|
1775
|
-
|
|
1776
|
-
await run_chat(
|
|
1777
|
-
session=session,
|
|
1778
|
-
stream=True,
|
|
1779
|
-
agent=self,
|
|
1780
|
-
deps=deps,
|
|
1781
|
-
console=Console(),
|
|
1782
|
-
code_theme='monokai',
|
|
1783
|
-
prog_name='pydantic-ai',
|
|
1784
|
-
)
|
|
1785
|
-
|
|
1786
|
-
def to_cli_sync(self: Self, deps: AgentDepsT = None) -> None:
|
|
1777
|
+
def to_cli_sync(self: Self, deps: AgentDepsT = None, prog_name: str = 'pydantic-ai') -> None:
|
|
1787
1778
|
"""Run the agent in a CLI chat interface with the non-async interface.
|
|
1788
1779
|
|
|
1780
|
+
Args:
|
|
1781
|
+
deps: The dependencies to pass to the agent.
|
|
1782
|
+
prog_name: The name of the program to use for the CLI. Defaults to 'pydantic-ai'.
|
|
1783
|
+
|
|
1789
1784
|
```python {title="agent_to_cli_sync.py" test="skip"}
|
|
1790
1785
|
from pydantic_ai import Agent
|
|
1791
1786
|
|
|
1792
1787
|
agent = Agent('openai:gpt-4o', instructions='You always respond in Italian.')
|
|
1793
1788
|
agent.to_cli_sync()
|
|
1789
|
+
agent.to_cli_sync(prog_name='assistant')
|
|
1794
1790
|
```
|
|
1795
1791
|
"""
|
|
1796
|
-
return get_event_loop().run_until_complete(self.to_cli(deps=deps))
|
|
1792
|
+
return get_event_loop().run_until_complete(self.to_cli(deps=deps, prog_name=prog_name))
|
|
1797
1793
|
|
|
1798
1794
|
|
|
1799
1795
|
@dataclasses.dataclass(repr=False)
|
|
@@ -1835,30 +1831,21 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
|
|
|
1835
1831
|
UserPromptPart(
|
|
1836
1832
|
content='What is the capital of France?',
|
|
1837
1833
|
timestamp=datetime.datetime(...),
|
|
1838
|
-
part_kind='user-prompt',
|
|
1839
1834
|
)
|
|
1840
|
-
]
|
|
1841
|
-
instructions=None,
|
|
1842
|
-
kind='request',
|
|
1835
|
+
]
|
|
1843
1836
|
)
|
|
1844
1837
|
),
|
|
1845
1838
|
CallToolsNode(
|
|
1846
1839
|
model_response=ModelResponse(
|
|
1847
|
-
parts=[TextPart(content='Paris'
|
|
1840
|
+
parts=[TextPart(content='Paris')],
|
|
1848
1841
|
usage=Usage(
|
|
1849
|
-
requests=1,
|
|
1850
|
-
request_tokens=56,
|
|
1851
|
-
response_tokens=1,
|
|
1852
|
-
total_tokens=57,
|
|
1853
|
-
details=None,
|
|
1842
|
+
requests=1, request_tokens=56, response_tokens=1, total_tokens=57
|
|
1854
1843
|
),
|
|
1855
1844
|
model_name='gpt-4o',
|
|
1856
1845
|
timestamp=datetime.datetime(...),
|
|
1857
|
-
kind='response',
|
|
1858
|
-
vendor_id=None,
|
|
1859
1846
|
)
|
|
1860
1847
|
),
|
|
1861
|
-
End(data=FinalResult(output='Paris'
|
|
1848
|
+
End(data=FinalResult(output='Paris')),
|
|
1862
1849
|
]
|
|
1863
1850
|
'''
|
|
1864
1851
|
print(agent_run.result.output)
|
|
@@ -1981,30 +1968,24 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
|
|
|
1981
1968
|
UserPromptPart(
|
|
1982
1969
|
content='What is the capital of France?',
|
|
1983
1970
|
timestamp=datetime.datetime(...),
|
|
1984
|
-
part_kind='user-prompt',
|
|
1985
1971
|
)
|
|
1986
|
-
]
|
|
1987
|
-
instructions=None,
|
|
1988
|
-
kind='request',
|
|
1972
|
+
]
|
|
1989
1973
|
)
|
|
1990
1974
|
),
|
|
1991
1975
|
CallToolsNode(
|
|
1992
1976
|
model_response=ModelResponse(
|
|
1993
|
-
parts=[TextPart(content='Paris'
|
|
1977
|
+
parts=[TextPart(content='Paris')],
|
|
1994
1978
|
usage=Usage(
|
|
1995
1979
|
requests=1,
|
|
1996
1980
|
request_tokens=56,
|
|
1997
1981
|
response_tokens=1,
|
|
1998
1982
|
total_tokens=57,
|
|
1999
|
-
details=None,
|
|
2000
1983
|
),
|
|
2001
1984
|
model_name='gpt-4o',
|
|
2002
1985
|
timestamp=datetime.datetime(...),
|
|
2003
|
-
kind='response',
|
|
2004
|
-
vendor_id=None,
|
|
2005
1986
|
)
|
|
2006
1987
|
),
|
|
2007
|
-
End(data=FinalResult(output='Paris'
|
|
1988
|
+
End(data=FinalResult(output='Paris')),
|
|
2008
1989
|
]
|
|
2009
1990
|
'''
|
|
2010
1991
|
print('Final result:', agent_run.result.output)
|
|
@@ -41,18 +41,10 @@ async def model_request(
|
|
|
41
41
|
print(model_response)
|
|
42
42
|
'''
|
|
43
43
|
ModelResponse(
|
|
44
|
-
parts=[TextPart(content='Paris'
|
|
45
|
-
usage=Usage(
|
|
46
|
-
requests=1,
|
|
47
|
-
request_tokens=56,
|
|
48
|
-
response_tokens=1,
|
|
49
|
-
total_tokens=57,
|
|
50
|
-
details=None,
|
|
51
|
-
),
|
|
44
|
+
parts=[TextPart(content='Paris')],
|
|
45
|
+
usage=Usage(requests=1, request_tokens=56, response_tokens=1, total_tokens=57),
|
|
52
46
|
model_name='claude-3-5-haiku-latest',
|
|
53
47
|
timestamp=datetime.datetime(...),
|
|
54
|
-
kind='response',
|
|
55
|
-
vendor_id=None,
|
|
56
48
|
)
|
|
57
49
|
'''
|
|
58
50
|
```
|
|
@@ -102,14 +94,10 @@ def model_request_sync(
|
|
|
102
94
|
print(model_response)
|
|
103
95
|
'''
|
|
104
96
|
ModelResponse(
|
|
105
|
-
parts=[TextPart(content='Paris'
|
|
106
|
-
usage=Usage(
|
|
107
|
-
requests=1, request_tokens=56, response_tokens=1, total_tokens=57, details=None
|
|
108
|
-
),
|
|
97
|
+
parts=[TextPart(content='Paris')],
|
|
98
|
+
usage=Usage(requests=1, request_tokens=56, response_tokens=1, total_tokens=57),
|
|
109
99
|
model_name='claude-3-5-haiku-latest',
|
|
110
100
|
timestamp=datetime.datetime(...),
|
|
111
|
-
kind='response',
|
|
112
|
-
vendor_id=None,
|
|
113
101
|
)
|
|
114
102
|
'''
|
|
115
103
|
```
|
|
@@ -163,23 +151,11 @@ def model_request_stream(
|
|
|
163
151
|
print(chunks)
|
|
164
152
|
'''
|
|
165
153
|
[
|
|
166
|
-
PartStartEvent(
|
|
167
|
-
index=0,
|
|
168
|
-
part=TextPart(content='Albert Einstein was ', part_kind='text'),
|
|
169
|
-
event_kind='part_start',
|
|
170
|
-
),
|
|
171
|
-
PartDeltaEvent(
|
|
172
|
-
index=0,
|
|
173
|
-
delta=TextPartDelta(
|
|
174
|
-
content_delta='a German-born theoretical ', part_delta_kind='text'
|
|
175
|
-
),
|
|
176
|
-
event_kind='part_delta',
|
|
177
|
-
),
|
|
154
|
+
PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
|
|
178
155
|
PartDeltaEvent(
|
|
179
|
-
index=0,
|
|
180
|
-
delta=TextPartDelta(content_delta='physicist.', part_delta_kind='text'),
|
|
181
|
-
event_kind='part_delta',
|
|
156
|
+
index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
|
|
182
157
|
),
|
|
158
|
+
PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
|
|
183
159
|
]
|
|
184
160
|
'''
|
|
185
161
|
```
|
|
@@ -46,6 +46,13 @@ class MCPServer(ABC):
|
|
|
46
46
|
"""
|
|
47
47
|
|
|
48
48
|
is_running: bool = False
|
|
49
|
+
tool_prefix: str | None = None
|
|
50
|
+
"""A prefix to add to all tools that are registered with the server.
|
|
51
|
+
|
|
52
|
+
If not empty, will include a trailing underscore(`_`).
|
|
53
|
+
|
|
54
|
+
e.g. if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
55
|
+
"""
|
|
49
56
|
|
|
50
57
|
_client: ClientSession
|
|
51
58
|
_read_stream: MemoryObjectReceiveStream[JSONRPCMessage | Exception]
|
|
@@ -57,7 +64,10 @@ class MCPServer(ABC):
|
|
|
57
64
|
async def client_streams(
|
|
58
65
|
self,
|
|
59
66
|
) -> AsyncIterator[
|
|
60
|
-
tuple[
|
|
67
|
+
tuple[
|
|
68
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
|
69
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
|
70
|
+
]
|
|
61
71
|
]:
|
|
62
72
|
"""Create the streams for the MCP server."""
|
|
63
73
|
raise NotImplementedError('MCP Server subclasses must implement this method.')
|
|
@@ -68,6 +78,14 @@ class MCPServer(ABC):
|
|
|
68
78
|
"""Get the log level for the MCP server."""
|
|
69
79
|
raise NotImplementedError('MCP Server subclasses must implement this method.')
|
|
70
80
|
|
|
81
|
+
def get_prefixed_tool_name(self, tool_name: str) -> str:
|
|
82
|
+
"""Get the tool name with prefix if `tool_prefix` is set."""
|
|
83
|
+
return f'{self.tool_prefix}_{tool_name}' if self.tool_prefix else tool_name
|
|
84
|
+
|
|
85
|
+
def get_unprefixed_tool_name(self, tool_name: str) -> str:
|
|
86
|
+
"""Get original tool name without prefix for calling tools."""
|
|
87
|
+
return tool_name.removeprefix(f'{self.tool_prefix}_') if self.tool_prefix else tool_name
|
|
88
|
+
|
|
71
89
|
async def list_tools(self) -> list[ToolDefinition]:
|
|
72
90
|
"""Retrieve tools that are currently active on the server.
|
|
73
91
|
|
|
@@ -78,7 +96,7 @@ class MCPServer(ABC):
|
|
|
78
96
|
tools = await self._client.list_tools()
|
|
79
97
|
return [
|
|
80
98
|
ToolDefinition(
|
|
81
|
-
name=tool.name,
|
|
99
|
+
name=self.get_prefixed_tool_name(tool.name),
|
|
82
100
|
description=tool.description or '',
|
|
83
101
|
parameters_json_schema=tool.inputSchema,
|
|
84
102
|
)
|
|
@@ -100,7 +118,7 @@ class MCPServer(ABC):
|
|
|
100
118
|
Raises:
|
|
101
119
|
ModelRetry: If the tool call fails.
|
|
102
120
|
"""
|
|
103
|
-
result = await self._client.call_tool(tool_name, arguments)
|
|
121
|
+
result = await self._client.call_tool(self.get_unprefixed_tool_name(tool_name), arguments)
|
|
104
122
|
|
|
105
123
|
content = [self._map_tool_result_part(part) for part in result.content]
|
|
106
124
|
|
|
@@ -126,7 +144,10 @@ class MCPServer(ABC):
|
|
|
126
144
|
return self
|
|
127
145
|
|
|
128
146
|
async def __aexit__(
|
|
129
|
-
self,
|
|
147
|
+
self,
|
|
148
|
+
exc_type: type[BaseException] | None,
|
|
149
|
+
exc_value: BaseException | None,
|
|
150
|
+
traceback: TracebackType | None,
|
|
130
151
|
) -> bool | None:
|
|
131
152
|
await self._exit_stack.aclose()
|
|
132
153
|
self.is_running = False
|
|
@@ -223,11 +244,22 @@ class MCPServerStdio(MCPServer):
|
|
|
223
244
|
cwd: str | Path | None = None
|
|
224
245
|
"""The working directory to use when spawning the process."""
|
|
225
246
|
|
|
247
|
+
tool_prefix: str | None = None
|
|
248
|
+
"""A prefix to add to all tools that are registered with the server.
|
|
249
|
+
|
|
250
|
+
If not empty, will include a trailing underscore(`_`).
|
|
251
|
+
|
|
252
|
+
e.g. if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
253
|
+
"""
|
|
254
|
+
|
|
226
255
|
@asynccontextmanager
|
|
227
256
|
async def client_streams(
|
|
228
257
|
self,
|
|
229
258
|
) -> AsyncIterator[
|
|
230
|
-
tuple[
|
|
259
|
+
tuple[
|
|
260
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
|
261
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
|
262
|
+
]
|
|
231
263
|
]:
|
|
232
264
|
server = StdioServerParameters(command=self.command, args=list(self.args), env=self.env, cwd=self.cwd)
|
|
233
265
|
async with stdio_client(server=server) as (read_stream, write_stream):
|
|
@@ -236,6 +268,9 @@ class MCPServerStdio(MCPServer):
|
|
|
236
268
|
def _get_log_level(self) -> LoggingLevel | None:
|
|
237
269
|
return self.log_level
|
|
238
270
|
|
|
271
|
+
def __repr__(self) -> str:
|
|
272
|
+
return f'MCPServerStdio(command={self.command!r}, args={self.args!r}, tool_prefix={self.tool_prefix!r})'
|
|
273
|
+
|
|
239
274
|
|
|
240
275
|
@dataclass
|
|
241
276
|
class MCPServerHTTP(MCPServer):
|
|
@@ -303,16 +338,33 @@ class MCPServerHTTP(MCPServer):
|
|
|
303
338
|
If `None`, no log level will be set.
|
|
304
339
|
"""
|
|
305
340
|
|
|
341
|
+
tool_prefix: str | None = None
|
|
342
|
+
"""A prefix to add to all tools that are registered with the server.
|
|
343
|
+
|
|
344
|
+
If not empty, will include a trailing underscore (`_`).
|
|
345
|
+
|
|
346
|
+
For example, if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
347
|
+
"""
|
|
348
|
+
|
|
306
349
|
@asynccontextmanager
|
|
307
350
|
async def client_streams(
|
|
308
351
|
self,
|
|
309
352
|
) -> AsyncIterator[
|
|
310
|
-
tuple[
|
|
353
|
+
tuple[
|
|
354
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
|
355
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
|
356
|
+
]
|
|
311
357
|
]: # pragma: no cover
|
|
312
358
|
async with sse_client(
|
|
313
|
-
url=self.url,
|
|
359
|
+
url=self.url,
|
|
360
|
+
headers=self.headers,
|
|
361
|
+
timeout=self.timeout,
|
|
362
|
+
sse_read_timeout=self.sse_read_timeout,
|
|
314
363
|
) as (read_stream, write_stream):
|
|
315
364
|
yield read_stream, write_stream
|
|
316
365
|
|
|
317
366
|
def _get_log_level(self) -> LoggingLevel | None:
|
|
318
367
|
return self.log_level
|
|
368
|
+
|
|
369
|
+
def __repr__(self) -> str: # pragma: no cover
|
|
370
|
+
return f'MCPServerHTTP(url={self.url!r}, tool_prefix={self.tool_prefix!r})'
|