pydantic-ai-slim 0.2.19__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/PKG-INFO +7 -7
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_agent_graph.py +50 -10
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_function_schema.py +12 -3
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_parts_manager.py +74 -2
- pydantic_ai_slim-0.3.0/pydantic_ai/_thinking_part.py +36 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/agent.py +3 -3
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/mcp.py +66 -5
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/messages.py +84 -3
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/anthropic.py +53 -9
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/bedrock.py +23 -4
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/cohere.py +9 -1
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/function.py +5 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/gemini.py +33 -8
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/google.py +27 -5
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/groq.py +13 -2
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/instrumented.py +1 -1
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/mistral.py +9 -1
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/openai.py +84 -5
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/test.py +9 -6
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pyproject.toml +3 -3
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/.gitignore +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/LICENSE +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/README.md +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.2.19 → pydantic_ai_slim-0.3.0}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,15 +30,15 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.0
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.0; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
41
|
-
Requires-Dist: boto3>=1.
|
|
41
|
+
Requires-Dist: boto3>=1.37.24; extra == 'bedrock'
|
|
42
42
|
Provides-Extra: cli
|
|
43
43
|
Requires-Dist: argcomplete>=3.5.0; extra == 'cli'
|
|
44
44
|
Requires-Dist: prompt-toolkit>=3; extra == 'cli'
|
|
@@ -48,11 +48,11 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.0; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
55
|
-
Requires-Dist: groq>=0.
|
|
55
|
+
Requires-Dist: groq>=0.19.0; extra == 'groq'
|
|
56
56
|
Provides-Extra: logfire
|
|
57
57
|
Requires-Dist: logfire>=3.11.0; extra == 'logfire'
|
|
58
58
|
Provides-Extra: mcp
|
|
@@ -60,7 +60,7 @@ Requires-Dist: mcp>=1.9.4; (python_version >= '3.10') and extra == 'mcp'
|
|
|
60
60
|
Provides-Extra: mistral
|
|
61
61
|
Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
|
|
62
62
|
Provides-Extra: openai
|
|
63
|
-
Requires-Dist: openai>=1.
|
|
63
|
+
Requires-Dist: openai>=1.76.0; extra == 'openai'
|
|
64
64
|
Provides-Extra: tavily
|
|
65
65
|
Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
|
|
66
66
|
Provides-Extra: vertexai
|
|
@@ -12,6 +12,7 @@ from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast
|
|
|
12
12
|
from opentelemetry.trace import Tracer
|
|
13
13
|
from typing_extensions import TypeGuard, TypeVar, assert_never
|
|
14
14
|
|
|
15
|
+
from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore
|
|
15
16
|
from pydantic_ai._utils import is_async_callable, run_in_executor
|
|
16
17
|
from pydantic_graph import BaseNode, Graph, GraphRunContext
|
|
17
18
|
from pydantic_graph.nodes import End, NodeRunEndT
|
|
@@ -50,8 +51,20 @@ OutputT = TypeVar('OutputT')
|
|
|
50
51
|
|
|
51
52
|
_HistoryProcessorSync = Callable[[list[_messages.ModelMessage]], list[_messages.ModelMessage]]
|
|
52
53
|
_HistoryProcessorAsync = Callable[[list[_messages.ModelMessage]], Awaitable[list[_messages.ModelMessage]]]
|
|
53
|
-
|
|
54
|
-
|
|
54
|
+
_HistoryProcessorSyncWithCtx = Callable[[RunContext[DepsT], list[_messages.ModelMessage]], list[_messages.ModelMessage]]
|
|
55
|
+
_HistoryProcessorAsyncWithCtx = Callable[
|
|
56
|
+
[RunContext[DepsT], list[_messages.ModelMessage]], Awaitable[list[_messages.ModelMessage]]
|
|
57
|
+
]
|
|
58
|
+
HistoryProcessor = Union[
|
|
59
|
+
_HistoryProcessorSync,
|
|
60
|
+
_HistoryProcessorAsync,
|
|
61
|
+
_HistoryProcessorSyncWithCtx[DepsT],
|
|
62
|
+
_HistoryProcessorAsyncWithCtx[DepsT],
|
|
63
|
+
]
|
|
64
|
+
"""A function that processes a list of model messages and returns a list of model messages.
|
|
65
|
+
|
|
66
|
+
Can optionally accept a `RunContext` as a parameter.
|
|
67
|
+
"""
|
|
55
68
|
|
|
56
69
|
|
|
57
70
|
@dataclasses.dataclass
|
|
@@ -92,7 +105,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
92
105
|
output_schema: _output.OutputSchema[OutputDataT] | None
|
|
93
106
|
output_validators: list[_output.OutputValidator[DepsT, OutputDataT]]
|
|
94
107
|
|
|
95
|
-
history_processors: Sequence[HistoryProcessor]
|
|
108
|
+
history_processors: Sequence[HistoryProcessor[DepsT]]
|
|
96
109
|
|
|
97
110
|
function_tools: dict[str, Tool[DepsT]] = dataclasses.field(repr=False)
|
|
98
111
|
mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
|
|
@@ -328,7 +341,9 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
328
341
|
|
|
329
342
|
model_settings, model_request_parameters = await self._prepare_request(ctx)
|
|
330
343
|
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
|
|
331
|
-
message_history = await _process_message_history(
|
|
344
|
+
message_history = await _process_message_history(
|
|
345
|
+
ctx.state.message_history, ctx.deps.history_processors, build_run_context(ctx)
|
|
346
|
+
)
|
|
332
347
|
async with ctx.deps.model.request_stream(
|
|
333
348
|
message_history, model_settings, model_request_parameters
|
|
334
349
|
) as streamed_response:
|
|
@@ -352,7 +367,9 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
352
367
|
|
|
353
368
|
model_settings, model_request_parameters = await self._prepare_request(ctx)
|
|
354
369
|
model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
|
|
355
|
-
message_history = await _process_message_history(
|
|
370
|
+
message_history = await _process_message_history(
|
|
371
|
+
ctx.state.message_history, ctx.deps.history_processors, build_run_context(ctx)
|
|
372
|
+
)
|
|
356
373
|
model_response = await ctx.deps.model.request(message_history, model_settings, model_request_parameters)
|
|
357
374
|
ctx.state.usage.incr(_usage.Usage())
|
|
358
375
|
|
|
@@ -425,7 +442,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
425
442
|
async for _event in stream:
|
|
426
443
|
pass
|
|
427
444
|
|
|
428
|
-
async def _run_stream(
|
|
445
|
+
async def _run_stream( # noqa: C901
|
|
429
446
|
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
430
447
|
) -> AsyncIterator[_messages.HandleResponseEvent]:
|
|
431
448
|
if self._events_iterator is None:
|
|
@@ -441,6 +458,12 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
441
458
|
texts.append(part.content)
|
|
442
459
|
elif isinstance(part, _messages.ToolCallPart):
|
|
443
460
|
tool_calls.append(part)
|
|
461
|
+
elif isinstance(part, _messages.ThinkingPart):
|
|
462
|
+
# We don't need to do anything with thinking parts in this tool-calling node.
|
|
463
|
+
# We need to handle text parts in case there are no tool calls and/or the desired output comes
|
|
464
|
+
# from the text, but thinking parts should not directly influence the execution of tools or
|
|
465
|
+
# determination of the next node of graph execution here.
|
|
466
|
+
pass
|
|
444
467
|
else:
|
|
445
468
|
assert_never(part)
|
|
446
469
|
|
|
@@ -762,7 +785,12 @@ async def _tool_from_mcp_server(
|
|
|
762
785
|
# some weird edge case occurs.
|
|
763
786
|
if not server.is_running: # pragma: no cover
|
|
764
787
|
raise exceptions.UserError(f'MCP server is not running: {server}')
|
|
765
|
-
|
|
788
|
+
|
|
789
|
+
if server.process_tool_call is not None:
|
|
790
|
+
result = await server.process_tool_call(ctx, server.call_tool, tool_name, args)
|
|
791
|
+
else:
|
|
792
|
+
result = await server.call_tool(tool_name, args)
|
|
793
|
+
|
|
766
794
|
return result
|
|
767
795
|
|
|
768
796
|
for server in ctx.deps.mcp_servers:
|
|
@@ -876,12 +904,24 @@ def build_agent_graph(
|
|
|
876
904
|
|
|
877
905
|
async def _process_message_history(
|
|
878
906
|
messages: list[_messages.ModelMessage],
|
|
879
|
-
processors: Sequence[HistoryProcessor],
|
|
907
|
+
processors: Sequence[HistoryProcessor[DepsT]],
|
|
908
|
+
run_context: RunContext[DepsT],
|
|
880
909
|
) -> list[_messages.ModelMessage]:
|
|
881
910
|
"""Process message history through a sequence of processors."""
|
|
882
911
|
for processor in processors:
|
|
912
|
+
takes_ctx = is_takes_ctx(processor)
|
|
913
|
+
|
|
883
914
|
if is_async_callable(processor):
|
|
884
|
-
|
|
915
|
+
if takes_ctx:
|
|
916
|
+
messages = await processor(run_context, messages)
|
|
917
|
+
else:
|
|
918
|
+
async_processor = cast(_HistoryProcessorAsync, processor)
|
|
919
|
+
messages = await async_processor(messages)
|
|
885
920
|
else:
|
|
886
|
-
|
|
921
|
+
if takes_ctx:
|
|
922
|
+
sync_processor_with_ctx = cast(_HistoryProcessorSyncWithCtx[DepsT], processor)
|
|
923
|
+
messages = await run_in_executor(sync_processor_with_ctx, run_context, messages)
|
|
924
|
+
else:
|
|
925
|
+
sync_processor = cast(_HistoryProcessorSync, processor)
|
|
926
|
+
messages = await run_in_executor(sync_processor, messages)
|
|
887
927
|
return messages
|
|
@@ -8,7 +8,7 @@ from __future__ import annotations as _annotations
|
|
|
8
8
|
from collections.abc import Awaitable
|
|
9
9
|
from dataclasses import dataclass, field
|
|
10
10
|
from inspect import Parameter, signature
|
|
11
|
-
from typing import TYPE_CHECKING, Any, Callable, cast
|
|
11
|
+
from typing import TYPE_CHECKING, Any, Callable, Union, cast
|
|
12
12
|
|
|
13
13
|
from pydantic import ConfigDict
|
|
14
14
|
from pydantic._internal import _decorators, _generate_schema, _typing_extra
|
|
@@ -17,7 +17,7 @@ from pydantic.fields import FieldInfo
|
|
|
17
17
|
from pydantic.json_schema import GenerateJsonSchema
|
|
18
18
|
from pydantic.plugin._schema_validator import create_schema_validator
|
|
19
19
|
from pydantic_core import SchemaValidator, core_schema
|
|
20
|
-
from typing_extensions import get_origin
|
|
20
|
+
from typing_extensions import Concatenate, ParamSpec, TypeIs, TypeVar, get_origin
|
|
21
21
|
|
|
22
22
|
from pydantic_ai.tools import RunContext
|
|
23
23
|
|
|
@@ -218,7 +218,16 @@ def function_schema( # noqa: C901
|
|
|
218
218
|
)
|
|
219
219
|
|
|
220
220
|
|
|
221
|
-
|
|
221
|
+
P = ParamSpec('P')
|
|
222
|
+
R = TypeVar('R')
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
WithCtx = Callable[Concatenate[RunContext[Any], P], R]
|
|
226
|
+
WithoutCtx = Callable[P, R]
|
|
227
|
+
TargetFunc = Union[WithCtx[P, R], WithoutCtx[P, R]]
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _takes_ctx(function: TargetFunc[P, R]) -> TypeIs[WithCtx[P, R]]:
|
|
222
231
|
"""Check if a function takes a `RunContext` first argument.
|
|
223
232
|
|
|
224
233
|
Args:
|
|
@@ -25,6 +25,8 @@ from pydantic_ai.messages import (
|
|
|
25
25
|
PartStartEvent,
|
|
26
26
|
TextPart,
|
|
27
27
|
TextPartDelta,
|
|
28
|
+
ThinkingPart,
|
|
29
|
+
ThinkingPartDelta,
|
|
28
30
|
ToolCallPart,
|
|
29
31
|
ToolCallPartDelta,
|
|
30
32
|
)
|
|
@@ -86,8 +88,7 @@ class ModelResponsePartsManager:
|
|
|
86
88
|
A `PartStartEvent` if a new part was created, or a `PartDeltaEvent` if an existing part was updated.
|
|
87
89
|
|
|
88
90
|
Raises:
|
|
89
|
-
UnexpectedModelBehavior: If attempting to apply text content to a part that is
|
|
90
|
-
not a TextPart.
|
|
91
|
+
UnexpectedModelBehavior: If attempting to apply text content to a part that is not a TextPart.
|
|
91
92
|
"""
|
|
92
93
|
existing_text_part_and_index: tuple[TextPart, int] | None = None
|
|
93
94
|
|
|
@@ -122,6 +123,77 @@ class ModelResponsePartsManager:
|
|
|
122
123
|
self._parts[part_index] = part_delta.apply(existing_text_part)
|
|
123
124
|
return PartDeltaEvent(index=part_index, delta=part_delta)
|
|
124
125
|
|
|
126
|
+
def handle_thinking_delta(
|
|
127
|
+
self,
|
|
128
|
+
*,
|
|
129
|
+
vendor_part_id: Hashable | None,
|
|
130
|
+
content: str | None = None,
|
|
131
|
+
signature: str | None = None,
|
|
132
|
+
) -> ModelResponseStreamEvent:
|
|
133
|
+
"""Handle incoming thinking content, creating or updating a ThinkingPart in the manager as appropriate.
|
|
134
|
+
|
|
135
|
+
When `vendor_part_id` is None, the latest part is updated if it exists and is a ThinkingPart;
|
|
136
|
+
otherwise, a new ThinkingPart is created. When a non-None ID is specified, the ThinkingPart corresponding
|
|
137
|
+
to that vendor ID is either created or updated.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
vendor_part_id: The ID the vendor uses to identify this piece
|
|
141
|
+
of thinking. If None, a new part will be created unless the latest part is already
|
|
142
|
+
a ThinkingPart.
|
|
143
|
+
content: The thinking content to append to the appropriate ThinkingPart.
|
|
144
|
+
signature: An optional signature for the thinking content.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
A `PartStartEvent` if a new part was created, or a `PartDeltaEvent` if an existing part was updated.
|
|
148
|
+
|
|
149
|
+
Raises:
|
|
150
|
+
UnexpectedModelBehavior: If attempting to apply a thinking delta to a part that is not a ThinkingPart.
|
|
151
|
+
"""
|
|
152
|
+
existing_thinking_part_and_index: tuple[ThinkingPart, int] | None = None
|
|
153
|
+
|
|
154
|
+
if vendor_part_id is None:
|
|
155
|
+
# If the vendor_part_id is None, check if the latest part is a ThinkingPart to update
|
|
156
|
+
if self._parts:
|
|
157
|
+
part_index = len(self._parts) - 1
|
|
158
|
+
latest_part = self._parts[part_index]
|
|
159
|
+
if isinstance(latest_part, ThinkingPart): # pragma: no branch
|
|
160
|
+
existing_thinking_part_and_index = latest_part, part_index
|
|
161
|
+
else:
|
|
162
|
+
# Otherwise, attempt to look up an existing ThinkingPart by vendor_part_id
|
|
163
|
+
part_index = self._vendor_id_to_part_index.get(vendor_part_id)
|
|
164
|
+
if part_index is not None:
|
|
165
|
+
existing_part = self._parts[part_index]
|
|
166
|
+
if not isinstance(existing_part, ThinkingPart):
|
|
167
|
+
raise UnexpectedModelBehavior(f'Cannot apply a thinking delta to {existing_part=}')
|
|
168
|
+
existing_thinking_part_and_index = existing_part, part_index
|
|
169
|
+
|
|
170
|
+
if existing_thinking_part_and_index is None:
|
|
171
|
+
if content is not None:
|
|
172
|
+
# There is no existing thinking part that should be updated, so create a new one
|
|
173
|
+
new_part_index = len(self._parts)
|
|
174
|
+
part = ThinkingPart(content=content, signature=signature)
|
|
175
|
+
if vendor_part_id is not None: # pragma: no branch
|
|
176
|
+
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
177
|
+
self._parts.append(part)
|
|
178
|
+
return PartStartEvent(index=new_part_index, part=part)
|
|
179
|
+
else:
|
|
180
|
+
raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
|
|
181
|
+
else:
|
|
182
|
+
if content is not None:
|
|
183
|
+
# Update the existing ThinkingPart with the new content delta
|
|
184
|
+
existing_thinking_part, part_index = existing_thinking_part_and_index
|
|
185
|
+
part_delta = ThinkingPartDelta(content_delta=content)
|
|
186
|
+
self._parts[part_index] = part_delta.apply(existing_thinking_part)
|
|
187
|
+
return PartDeltaEvent(index=part_index, delta=part_delta)
|
|
188
|
+
elif signature is not None:
|
|
189
|
+
# Update the existing ThinkingPart with the new signature delta
|
|
190
|
+
existing_thinking_part, part_index = existing_thinking_part_and_index
|
|
191
|
+
part_delta = ThinkingPartDelta(signature_delta=signature)
|
|
192
|
+
self._parts[part_index] = part_delta.apply(existing_thinking_part)
|
|
193
|
+
return PartDeltaEvent(index=part_index, delta=part_delta)
|
|
194
|
+
else:
|
|
195
|
+
raise UnexpectedModelBehavior('Cannot update a ThinkingPart with no content or signature')
|
|
196
|
+
|
|
125
197
|
def handle_tool_call_delta(
|
|
126
198
|
self,
|
|
127
199
|
*,
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
from pydantic_ai.messages import TextPart, ThinkingPart
|
|
4
|
+
|
|
5
|
+
START_THINK_TAG = '<think>'
|
|
6
|
+
END_THINK_TAG = '</think>'
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def split_content_into_text_and_thinking(content: str) -> list[ThinkingPart | TextPart]:
|
|
10
|
+
"""Split a string into text and thinking parts.
|
|
11
|
+
|
|
12
|
+
Some models don't return the thinking part as a separate part, but rather as a tag in the content.
|
|
13
|
+
This function splits the content into text and thinking parts.
|
|
14
|
+
|
|
15
|
+
We use the `<think>` tag because that's how Groq uses it in the `raw` format, so instead of using `<Thinking>` or
|
|
16
|
+
something else, we just match the tag to make it easier for other models that don't support the `ThinkingPart`.
|
|
17
|
+
"""
|
|
18
|
+
parts: list[ThinkingPart | TextPart] = []
|
|
19
|
+
|
|
20
|
+
start_index = content.find(START_THINK_TAG)
|
|
21
|
+
while start_index >= 0:
|
|
22
|
+
before_think, content = content[:start_index], content[start_index + len(START_THINK_TAG) :]
|
|
23
|
+
if before_think:
|
|
24
|
+
parts.append(TextPart(content=before_think))
|
|
25
|
+
end_index = content.find(END_THINK_TAG)
|
|
26
|
+
if end_index >= 0:
|
|
27
|
+
think_content, content = content[:end_index], content[end_index + len(END_THINK_TAG) :]
|
|
28
|
+
parts.append(ThinkingPart(content=think_content))
|
|
29
|
+
else:
|
|
30
|
+
# We lose the `<think>` tag, but it shouldn't matter.
|
|
31
|
+
parts.append(TextPart(content=content))
|
|
32
|
+
content = ''
|
|
33
|
+
start_index = content.find(START_THINK_TAG)
|
|
34
|
+
if content:
|
|
35
|
+
parts.append(TextPart(content=content))
|
|
36
|
+
return parts
|
|
@@ -180,7 +180,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
180
180
|
defer_model_check: bool = False,
|
|
181
181
|
end_strategy: EndStrategy = 'early',
|
|
182
182
|
instrument: InstrumentationSettings | bool | None = None,
|
|
183
|
-
history_processors: Sequence[HistoryProcessor] | None = None,
|
|
183
|
+
history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None,
|
|
184
184
|
) -> None: ...
|
|
185
185
|
|
|
186
186
|
@overload
|
|
@@ -210,7 +210,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
210
210
|
defer_model_check: bool = False,
|
|
211
211
|
end_strategy: EndStrategy = 'early',
|
|
212
212
|
instrument: InstrumentationSettings | bool | None = None,
|
|
213
|
-
history_processors: Sequence[HistoryProcessor] | None = None,
|
|
213
|
+
history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None,
|
|
214
214
|
) -> None: ...
|
|
215
215
|
|
|
216
216
|
def __init__(
|
|
@@ -235,7 +235,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
235
235
|
defer_model_check: bool = False,
|
|
236
236
|
end_strategy: EndStrategy = 'early',
|
|
237
237
|
instrument: InstrumentationSettings | bool | None = None,
|
|
238
|
-
history_processors: Sequence[HistoryProcessor] | None = None,
|
|
238
|
+
history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None,
|
|
239
239
|
**_deprecated_kwargs: Any,
|
|
240
240
|
):
|
|
241
241
|
"""Create an agent.
|
|
@@ -4,7 +4,7 @@ import base64
|
|
|
4
4
|
import functools
|
|
5
5
|
import json
|
|
6
6
|
from abc import ABC, abstractmethod
|
|
7
|
-
from collections.abc import AsyncIterator, Sequence
|
|
7
|
+
from collections.abc import AsyncIterator, Awaitable, Sequence
|
|
8
8
|
from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager
|
|
9
9
|
from dataclasses import dataclass
|
|
10
10
|
from pathlib import Path
|
|
@@ -15,14 +15,20 @@ import anyio
|
|
|
15
15
|
import httpx
|
|
16
16
|
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
|
17
17
|
from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
|
|
18
|
+
from mcp.shared.exceptions import McpError
|
|
18
19
|
from mcp.shared.message import SessionMessage
|
|
19
20
|
from mcp.types import (
|
|
20
21
|
AudioContent,
|
|
21
22
|
BlobResourceContents,
|
|
23
|
+
CallToolRequest,
|
|
24
|
+
CallToolRequestParams,
|
|
25
|
+
CallToolResult,
|
|
26
|
+
ClientRequest,
|
|
22
27
|
Content,
|
|
23
28
|
EmbeddedResource,
|
|
24
29
|
ImageContent,
|
|
25
30
|
LoggingLevel,
|
|
31
|
+
RequestParams,
|
|
26
32
|
TextContent,
|
|
27
33
|
TextResourceContents,
|
|
28
34
|
)
|
|
@@ -30,7 +36,7 @@ from typing_extensions import Self, assert_never, deprecated
|
|
|
30
36
|
|
|
31
37
|
from pydantic_ai.exceptions import ModelRetry
|
|
32
38
|
from pydantic_ai.messages import BinaryContent
|
|
33
|
-
from pydantic_ai.tools import ToolDefinition
|
|
39
|
+
from pydantic_ai.tools import RunContext, ToolDefinition
|
|
34
40
|
|
|
35
41
|
try:
|
|
36
42
|
from mcp.client.session import ClientSession
|
|
@@ -60,6 +66,9 @@ class MCPServer(ABC):
|
|
|
60
66
|
e.g. if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
61
67
|
"""
|
|
62
68
|
|
|
69
|
+
process_tool_call: ProcessToolCallback | None = None
|
|
70
|
+
"""Hook to customize tool calling and optionally pass extra metadata."""
|
|
71
|
+
|
|
63
72
|
_client: ClientSession
|
|
64
73
|
_read_stream: MemoryObjectReceiveStream[SessionMessage | Exception]
|
|
65
74
|
_write_stream: MemoryObjectSendStream[SessionMessage]
|
|
@@ -113,13 +122,17 @@ class MCPServer(ABC):
|
|
|
113
122
|
]
|
|
114
123
|
|
|
115
124
|
async def call_tool(
|
|
116
|
-
self,
|
|
117
|
-
|
|
125
|
+
self,
|
|
126
|
+
tool_name: str,
|
|
127
|
+
arguments: dict[str, Any],
|
|
128
|
+
metadata: dict[str, Any] | None = None,
|
|
129
|
+
) -> ToolResult:
|
|
118
130
|
"""Call a tool on the server.
|
|
119
131
|
|
|
120
132
|
Args:
|
|
121
133
|
tool_name: The name of the tool to call.
|
|
122
134
|
arguments: The arguments to pass to the tool.
|
|
135
|
+
metadata: Request-level metadata (optional)
|
|
123
136
|
|
|
124
137
|
Returns:
|
|
125
138
|
The result of the tool call.
|
|
@@ -127,7 +140,23 @@ class MCPServer(ABC):
|
|
|
127
140
|
Raises:
|
|
128
141
|
ModelRetry: If the tool call fails.
|
|
129
142
|
"""
|
|
130
|
-
|
|
143
|
+
try:
|
|
144
|
+
# meta param is not provided by session yet, so build and can send_request directly.
|
|
145
|
+
result = await self._client.send_request(
|
|
146
|
+
ClientRequest(
|
|
147
|
+
CallToolRequest(
|
|
148
|
+
method='tools/call',
|
|
149
|
+
params=CallToolRequestParams(
|
|
150
|
+
name=self.get_unprefixed_tool_name(tool_name),
|
|
151
|
+
arguments=arguments,
|
|
152
|
+
_meta=RequestParams.Meta(**metadata) if metadata else None,
|
|
153
|
+
),
|
|
154
|
+
)
|
|
155
|
+
),
|
|
156
|
+
CallToolResult,
|
|
157
|
+
)
|
|
158
|
+
except McpError as e:
|
|
159
|
+
raise ModelRetry(e.error.message)
|
|
131
160
|
|
|
132
161
|
content = [self._map_tool_result_part(part) for part in result.content]
|
|
133
162
|
|
|
@@ -265,6 +294,9 @@ class MCPServerStdio(MCPServer):
|
|
|
265
294
|
e.g. if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
266
295
|
"""
|
|
267
296
|
|
|
297
|
+
process_tool_call: ProcessToolCallback | None = None
|
|
298
|
+
"""Hook to customize tool calling and optionally pass extra metadata."""
|
|
299
|
+
|
|
268
300
|
timeout: float = 5
|
|
269
301
|
""" The timeout in seconds to wait for the client to initialize."""
|
|
270
302
|
|
|
@@ -359,6 +391,9 @@ class _MCPServerHTTP(MCPServer):
|
|
|
359
391
|
For example, if `tool_prefix='foo'`, then a tool named `bar` will be registered as `foo_bar`
|
|
360
392
|
"""
|
|
361
393
|
|
|
394
|
+
process_tool_call: ProcessToolCallback | None = None
|
|
395
|
+
"""Hook to customize tool calling and optionally pass extra metadata."""
|
|
396
|
+
|
|
362
397
|
@property
|
|
363
398
|
@abstractmethod
|
|
364
399
|
def _transport_client(
|
|
@@ -517,3 +552,29 @@ class MCPServerStreamableHTTP(_MCPServerHTTP):
|
|
|
517
552
|
@property
|
|
518
553
|
def _transport_client(self):
|
|
519
554
|
return streamablehttp_client # pragma: no cover
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
ToolResult = (
|
|
558
|
+
str | BinaryContent | dict[str, Any] | list[Any] | Sequence[str | BinaryContent | dict[str, Any] | list[Any]]
|
|
559
|
+
)
|
|
560
|
+
"""The result type of a tool call."""
|
|
561
|
+
|
|
562
|
+
CallToolFunc = Callable[[str, dict[str, Any], dict[str, Any] | None], Awaitable[ToolResult]]
|
|
563
|
+
"""A function type that represents a tool call."""
|
|
564
|
+
|
|
565
|
+
ProcessToolCallback = Callable[
|
|
566
|
+
[
|
|
567
|
+
RunContext[Any],
|
|
568
|
+
CallToolFunc,
|
|
569
|
+
str,
|
|
570
|
+
dict[str, Any],
|
|
571
|
+
],
|
|
572
|
+
Awaitable[ToolResult],
|
|
573
|
+
]
|
|
574
|
+
"""A process tool callback.
|
|
575
|
+
|
|
576
|
+
It accepts a run context, the original tool call function, a tool name, and arguments.
|
|
577
|
+
|
|
578
|
+
Allows wrapping an MCP server tool call to customize it, including adding extra request
|
|
579
|
+
metadata.
|
|
580
|
+
"""
|
|
@@ -14,7 +14,10 @@ from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUs
|
|
|
14
14
|
from typing_extensions import TypeAlias
|
|
15
15
|
|
|
16
16
|
from . import _utils
|
|
17
|
-
from ._utils import
|
|
17
|
+
from ._utils import (
|
|
18
|
+
generate_tool_call_id as _generate_tool_call_id,
|
|
19
|
+
now_utc as _now_utc,
|
|
20
|
+
)
|
|
18
21
|
from .exceptions import UnexpectedModelBehavior
|
|
19
22
|
from .usage import Usage
|
|
20
23
|
|
|
@@ -531,6 +534,32 @@ class TextPart:
|
|
|
531
534
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
532
535
|
|
|
533
536
|
|
|
537
|
+
@dataclass(repr=False)
|
|
538
|
+
class ThinkingPart:
|
|
539
|
+
"""A thinking response from a model."""
|
|
540
|
+
|
|
541
|
+
content: str
|
|
542
|
+
"""The thinking content of the response."""
|
|
543
|
+
|
|
544
|
+
id: str | None = None
|
|
545
|
+
"""The identifier of the thinking part."""
|
|
546
|
+
|
|
547
|
+
signature: str | None = None
|
|
548
|
+
"""The signature of the thinking.
|
|
549
|
+
|
|
550
|
+
The signature is only available on the Anthropic models.
|
|
551
|
+
"""
|
|
552
|
+
|
|
553
|
+
part_kind: Literal['thinking'] = 'thinking'
|
|
554
|
+
"""Part type identifier, this is available on all parts as a discriminator."""
|
|
555
|
+
|
|
556
|
+
def has_content(self) -> bool:
|
|
557
|
+
"""Return `True` if the thinking content is non-empty."""
|
|
558
|
+
return bool(self.content) # pragma: no cover
|
|
559
|
+
|
|
560
|
+
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
561
|
+
|
|
562
|
+
|
|
534
563
|
@dataclass(repr=False)
|
|
535
564
|
class ToolCallPart:
|
|
536
565
|
"""A tool call from a model."""
|
|
@@ -589,7 +618,7 @@ class ToolCallPart:
|
|
|
589
618
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
590
619
|
|
|
591
620
|
|
|
592
|
-
ModelResponsePart = Annotated[Union[TextPart, ToolCallPart], pydantic.Discriminator('part_kind')]
|
|
621
|
+
ModelResponsePart = Annotated[Union[TextPart, ToolCallPart, ThinkingPart], pydantic.Discriminator('part_kind')]
|
|
593
622
|
"""A message part returned by a model."""
|
|
594
623
|
|
|
595
624
|
|
|
@@ -699,6 +728,56 @@ class TextPartDelta:
|
|
|
699
728
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
700
729
|
|
|
701
730
|
|
|
731
|
+
@dataclass(repr=False)
|
|
732
|
+
class ThinkingPartDelta:
|
|
733
|
+
"""A partial update (delta) for a `ThinkingPart` to append new thinking content."""
|
|
734
|
+
|
|
735
|
+
content_delta: str | None = None
|
|
736
|
+
"""The incremental thinking content to add to the existing `ThinkingPart` content."""
|
|
737
|
+
|
|
738
|
+
signature_delta: str | None = None
|
|
739
|
+
"""Optional signature delta.
|
|
740
|
+
|
|
741
|
+
Note this is never treated as a delta — it can replace None.
|
|
742
|
+
"""
|
|
743
|
+
|
|
744
|
+
part_delta_kind: Literal['thinking'] = 'thinking'
|
|
745
|
+
"""Part delta type identifier, used as a discriminator."""
|
|
746
|
+
|
|
747
|
+
@overload
|
|
748
|
+
def apply(self, part: ModelResponsePart) -> ThinkingPart: ...
|
|
749
|
+
|
|
750
|
+
@overload
|
|
751
|
+
def apply(self, part: ModelResponsePart | ThinkingPartDelta) -> ThinkingPart | ThinkingPartDelta: ...
|
|
752
|
+
|
|
753
|
+
def apply(self, part: ModelResponsePart | ThinkingPartDelta) -> ThinkingPart | ThinkingPartDelta:
|
|
754
|
+
"""Apply this thinking delta to an existing `ThinkingPart`.
|
|
755
|
+
|
|
756
|
+
Args:
|
|
757
|
+
part: The existing model response part, which must be a `ThinkingPart`.
|
|
758
|
+
|
|
759
|
+
Returns:
|
|
760
|
+
A new `ThinkingPart` with updated thinking content.
|
|
761
|
+
|
|
762
|
+
Raises:
|
|
763
|
+
ValueError: If `part` is not a `ThinkingPart`.
|
|
764
|
+
"""
|
|
765
|
+
if isinstance(part, ThinkingPart):
|
|
766
|
+
return replace(part, content=part.content + self.content_delta if self.content_delta else None)
|
|
767
|
+
elif isinstance(part, ThinkingPartDelta):
|
|
768
|
+
if self.content_delta is None and self.signature_delta is None:
|
|
769
|
+
raise ValueError('Cannot apply ThinkingPartDelta with no content or signature')
|
|
770
|
+
if self.signature_delta is not None:
|
|
771
|
+
return replace(part, signature_delta=self.signature_delta)
|
|
772
|
+
if self.content_delta is not None:
|
|
773
|
+
return replace(part, content_delta=self.content_delta)
|
|
774
|
+
raise ValueError( # pragma: no cover
|
|
775
|
+
f'Cannot apply ThinkingPartDeltas to non-ThinkingParts or non-ThinkingPartDeltas ({part=}, {self=})'
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
779
|
+
|
|
780
|
+
|
|
702
781
|
@dataclass(repr=False)
|
|
703
782
|
class ToolCallPartDelta:
|
|
704
783
|
"""A partial update (delta) for a `ToolCallPart` to modify tool name, arguments, or tool call ID."""
|
|
@@ -818,7 +897,9 @@ class ToolCallPartDelta:
|
|
|
818
897
|
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
819
898
|
|
|
820
899
|
|
|
821
|
-
ModelResponsePartDelta = Annotated[
|
|
900
|
+
ModelResponsePartDelta = Annotated[
|
|
901
|
+
Union[TextPartDelta, ThinkingPartDelta, ToolCallPartDelta], pydantic.Discriminator('part_delta_kind')
|
|
902
|
+
]
|
|
822
903
|
"""A partial update (delta) for any model response part."""
|
|
823
904
|
|
|
824
905
|
|