pydantic-ai-slim 1.0.13__py3-none-any.whl → 1.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +19 -1
- pydantic_ai/_agent_graph.py +118 -97
- pydantic_ai/_cli.py +4 -7
- pydantic_ai/_output.py +236 -192
- pydantic_ai/_parts_manager.py +8 -42
- pydantic_ai/_tool_manager.py +9 -16
- pydantic_ai/agent/abstract.py +169 -1
- pydantic_ai/builtin_tools.py +82 -0
- pydantic_ai/direct.py +7 -0
- pydantic_ai/durable_exec/dbos/_agent.py +106 -3
- pydantic_ai/durable_exec/temporal/_agent.py +123 -6
- pydantic_ai/durable_exec/temporal/_model.py +8 -0
- pydantic_ai/format_prompt.py +4 -3
- pydantic_ai/mcp.py +20 -10
- pydantic_ai/messages.py +149 -3
- pydantic_ai/models/__init__.py +15 -1
- pydantic_ai/models/anthropic.py +7 -3
- pydantic_ai/models/cohere.py +4 -0
- pydantic_ai/models/function.py +7 -4
- pydantic_ai/models/gemini.py +8 -0
- pydantic_ai/models/google.py +56 -23
- pydantic_ai/models/groq.py +11 -5
- pydantic_ai/models/huggingface.py +5 -3
- pydantic_ai/models/mistral.py +6 -8
- pydantic_ai/models/openai.py +197 -58
- pydantic_ai/models/test.py +4 -0
- pydantic_ai/output.py +5 -2
- pydantic_ai/profiles/__init__.py +2 -0
- pydantic_ai/profiles/google.py +5 -2
- pydantic_ai/profiles/openai.py +2 -1
- pydantic_ai/result.py +46 -30
- pydantic_ai/run.py +35 -7
- pydantic_ai/usage.py +5 -4
- {pydantic_ai_slim-1.0.13.dist-info → pydantic_ai_slim-1.0.15.dist-info}/METADATA +3 -3
- {pydantic_ai_slim-1.0.13.dist-info → pydantic_ai_slim-1.0.15.dist-info}/RECORD +38 -38
- {pydantic_ai_slim-1.0.13.dist-info → pydantic_ai_slim-1.0.15.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.13.dist-info → pydantic_ai_slim-1.0.15.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.13.dist-info → pydantic_ai_slim-1.0.15.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/__init__.py
CHANGED
|
@@ -9,7 +9,14 @@ from .agent import (
|
|
|
9
9
|
UserPromptNode,
|
|
10
10
|
capture_run_messages,
|
|
11
11
|
)
|
|
12
|
-
from .builtin_tools import
|
|
12
|
+
from .builtin_tools import (
|
|
13
|
+
CodeExecutionTool,
|
|
14
|
+
ImageGenerationTool,
|
|
15
|
+
MemoryTool,
|
|
16
|
+
UrlContextTool,
|
|
17
|
+
WebSearchTool,
|
|
18
|
+
WebSearchUserLocation,
|
|
19
|
+
)
|
|
13
20
|
from .exceptions import (
|
|
14
21
|
AgentRunError,
|
|
15
22
|
ApprovalRequired,
|
|
@@ -30,11 +37,13 @@ from .messages import (
|
|
|
30
37
|
BaseToolCallPart,
|
|
31
38
|
BaseToolReturnPart,
|
|
32
39
|
BinaryContent,
|
|
40
|
+
BinaryImage,
|
|
33
41
|
BuiltinToolCallPart,
|
|
34
42
|
BuiltinToolReturnPart,
|
|
35
43
|
DocumentFormat,
|
|
36
44
|
DocumentMediaType,
|
|
37
45
|
DocumentUrl,
|
|
46
|
+
FilePart,
|
|
38
47
|
FileUrl,
|
|
39
48
|
FinalResultEvent,
|
|
40
49
|
FinishReason,
|
|
@@ -79,6 +88,7 @@ from .profiles import (
|
|
|
79
88
|
ModelProfile,
|
|
80
89
|
ModelProfileSpec,
|
|
81
90
|
)
|
|
91
|
+
from .run import AgentRun, AgentRunResult, AgentRunResultEvent
|
|
82
92
|
from .settings import ModelSettings
|
|
83
93
|
from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied
|
|
84
94
|
from .toolsets import (
|
|
@@ -131,6 +141,7 @@ __all__ = (
|
|
|
131
141
|
'DocumentMediaType',
|
|
132
142
|
'DocumentUrl',
|
|
133
143
|
'FileUrl',
|
|
144
|
+
'FilePart',
|
|
134
145
|
'FinalResultEvent',
|
|
135
146
|
'FinishReason',
|
|
136
147
|
'FunctionToolCallEvent',
|
|
@@ -139,6 +150,7 @@ __all__ = (
|
|
|
139
150
|
'ImageFormat',
|
|
140
151
|
'ImageMediaType',
|
|
141
152
|
'ImageUrl',
|
|
153
|
+
'BinaryImage',
|
|
142
154
|
'ModelMessage',
|
|
143
155
|
'ModelMessagesTypeAdapter',
|
|
144
156
|
'ModelRequest',
|
|
@@ -197,6 +209,8 @@ __all__ = (
|
|
|
197
209
|
'WebSearchUserLocation',
|
|
198
210
|
'UrlContextTool',
|
|
199
211
|
'CodeExecutionTool',
|
|
212
|
+
'ImageGenerationTool',
|
|
213
|
+
'MemoryTool',
|
|
200
214
|
# output
|
|
201
215
|
'ToolOutput',
|
|
202
216
|
'NativeOutput',
|
|
@@ -211,5 +225,9 @@ __all__ = (
|
|
|
211
225
|
'RunUsage',
|
|
212
226
|
'RequestUsage',
|
|
213
227
|
'UsageLimits',
|
|
228
|
+
# run
|
|
229
|
+
'AgentRun',
|
|
230
|
+
'AgentRunResult',
|
|
231
|
+
'AgentRunResultEvent',
|
|
214
232
|
)
|
|
215
233
|
__version__ = _metadata_version('pydantic_ai_slim')
|
pydantic_ai/_agent_graph.py
CHANGED
|
@@ -87,10 +87,10 @@ Can optionally accept a `RunContext` as a parameter.
|
|
|
87
87
|
class GraphAgentState:
|
|
88
88
|
"""State kept across the execution of the agent graph."""
|
|
89
89
|
|
|
90
|
-
message_history: list[_messages.ModelMessage]
|
|
91
|
-
usage: _usage.RunUsage
|
|
92
|
-
retries: int
|
|
93
|
-
run_step: int
|
|
90
|
+
message_history: list[_messages.ModelMessage] = dataclasses.field(default_factory=list)
|
|
91
|
+
usage: _usage.RunUsage = dataclasses.field(default_factory=_usage.RunUsage)
|
|
92
|
+
retries: int = 0
|
|
93
|
+
run_step: int = 0
|
|
94
94
|
|
|
95
95
|
def increment_retries(self, max_result_retries: int, error: BaseException | None = None) -> None:
|
|
96
96
|
self.retries += 1
|
|
@@ -222,7 +222,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
222
222
|
if self.user_prompt is None:
|
|
223
223
|
# Skip ModelRequestNode and go directly to CallToolsNode
|
|
224
224
|
return CallToolsNode[DepsT, NodeRunEndT](last_message)
|
|
225
|
-
elif
|
|
225
|
+
elif last_message.tool_calls:
|
|
226
226
|
raise exceptions.UserError(
|
|
227
227
|
'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
|
|
228
228
|
)
|
|
@@ -230,7 +230,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
230
230
|
# Build the run context after `ctx.deps.prompt` has been updated
|
|
231
231
|
run_context = build_run_context(ctx)
|
|
232
232
|
|
|
233
|
-
parts: list[_messages.ModelRequestPart] = []
|
|
234
233
|
if messages:
|
|
235
234
|
await self._reevaluate_dynamic_prompts(messages, run_context)
|
|
236
235
|
|
|
@@ -272,7 +271,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
272
271
|
raise exceptions.UserError(
|
|
273
272
|
'Tool call results were provided, but the message history does not contain a `ModelResponse`.'
|
|
274
273
|
)
|
|
275
|
-
if not
|
|
274
|
+
if not last_model_response.tool_calls:
|
|
276
275
|
raise exceptions.UserError(
|
|
277
276
|
'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
|
|
278
277
|
)
|
|
@@ -356,9 +355,6 @@ async def _prepare_request_parameters(
|
|
|
356
355
|
if isinstance(output_schema, _output.NativeOutputSchema):
|
|
357
356
|
output_object = output_schema.object_def
|
|
358
357
|
|
|
359
|
-
# ToolOrTextOutputSchema, NativeOutputSchema, and PromptedOutputSchema all inherit from TextOutputSchema
|
|
360
|
-
allow_text_output = isinstance(output_schema, _output.TextOutputSchema)
|
|
361
|
-
|
|
362
358
|
function_tools: list[ToolDefinition] = []
|
|
363
359
|
output_tools: list[ToolDefinition] = []
|
|
364
360
|
for tool_def in ctx.deps.tool_manager.tool_defs:
|
|
@@ -373,7 +369,8 @@ async def _prepare_request_parameters(
|
|
|
373
369
|
output_mode=output_schema.mode,
|
|
374
370
|
output_tools=output_tools,
|
|
375
371
|
output_object=output_object,
|
|
376
|
-
allow_text_output=
|
|
372
|
+
allow_text_output=output_schema.allows_text,
|
|
373
|
+
allow_image_output=output_schema.allows_image,
|
|
377
374
|
)
|
|
378
375
|
|
|
379
376
|
|
|
@@ -458,15 +455,13 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
458
455
|
|
|
459
456
|
original_history = ctx.state.message_history[:]
|
|
460
457
|
message_history = await _process_message_history(original_history, ctx.deps.history_processors, run_context)
|
|
461
|
-
# Never merge the new `ModelRequest` with the one preceding it, to keep `new_messages()` from accidentally including part of the existing message history
|
|
462
|
-
message_history = [*_clean_message_history(message_history[:-1]), message_history[-1]]
|
|
463
458
|
# `ctx.state.message_history` is the same list used by `capture_run_messages`, so we should replace its contents, not the reference
|
|
464
459
|
ctx.state.message_history[:] = message_history
|
|
465
460
|
# Update the new message index to ensure `result.new_messages()` returns the correct messages
|
|
466
461
|
ctx.deps.new_message_index -= len(original_history) - len(message_history)
|
|
467
462
|
|
|
468
|
-
#
|
|
469
|
-
# but don't store it in the message history on state.
|
|
463
|
+
# Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts,
|
|
464
|
+
# but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries.
|
|
470
465
|
# See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary
|
|
471
466
|
message_history = _clean_message_history(message_history)
|
|
472
467
|
|
|
@@ -545,27 +540,58 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
545
540
|
if self._events_iterator is None:
|
|
546
541
|
# Ensure that the stream is only run once
|
|
547
542
|
|
|
543
|
+
output_schema = ctx.deps.output_schema
|
|
544
|
+
|
|
548
545
|
async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
|
|
546
|
+
if not self.model_response.parts:
|
|
547
|
+
# we got an empty response.
|
|
548
|
+
# this sometimes happens with anthropic (and perhaps other models)
|
|
549
|
+
# when the model has already returned text along side tool calls
|
|
550
|
+
if text_processor := output_schema.text_processor:
|
|
551
|
+
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
552
|
+
# response, if any
|
|
553
|
+
for message in reversed(ctx.state.message_history):
|
|
554
|
+
if isinstance(message, _messages.ModelResponse):
|
|
555
|
+
text = ''
|
|
556
|
+
for part in message.parts:
|
|
557
|
+
if isinstance(part, _messages.TextPart):
|
|
558
|
+
text += part.content
|
|
559
|
+
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
560
|
+
# Text parts before a built-in tool call are essentially thoughts,
|
|
561
|
+
# not part of the final result output, so we reset the accumulated text
|
|
562
|
+
text = '' # pragma: no cover
|
|
563
|
+
if text:
|
|
564
|
+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
|
|
565
|
+
return
|
|
566
|
+
|
|
567
|
+
# Go back to the model request node with an empty request, which means we'll essentially
|
|
568
|
+
# resubmit the most recent request that resulted in an empty response,
|
|
569
|
+
# as the empty response and request will not create any items in the API payload,
|
|
570
|
+
# in the hope the model will return a non-empty response this time.
|
|
571
|
+
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
572
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
|
|
573
|
+
return
|
|
574
|
+
|
|
549
575
|
text = ''
|
|
550
576
|
tool_calls: list[_messages.ToolCallPart] = []
|
|
551
|
-
|
|
577
|
+
files: list[_messages.BinaryContent] = []
|
|
552
578
|
|
|
553
579
|
for part in self.model_response.parts:
|
|
554
580
|
if isinstance(part, _messages.TextPart):
|
|
555
581
|
text += part.content
|
|
556
582
|
elif isinstance(part, _messages.ToolCallPart):
|
|
557
583
|
tool_calls.append(part)
|
|
584
|
+
elif isinstance(part, _messages.FilePart):
|
|
585
|
+
files.append(part.content)
|
|
558
586
|
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
559
587
|
# Text parts before a built-in tool call are essentially thoughts,
|
|
560
588
|
# not part of the final result output, so we reset the accumulated text
|
|
561
589
|
text = ''
|
|
562
|
-
invisible_parts = True
|
|
563
590
|
yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
|
|
564
591
|
elif isinstance(part, _messages.BuiltinToolReturnPart):
|
|
565
|
-
invisible_parts = True
|
|
566
592
|
yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
|
|
567
593
|
elif isinstance(part, _messages.ThinkingPart):
|
|
568
|
-
|
|
594
|
+
pass
|
|
569
595
|
else:
|
|
570
596
|
assert_never(part)
|
|
571
597
|
|
|
@@ -574,47 +600,35 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
574
600
|
# This accounts for cases like anthropic returns that might contain a text response
|
|
575
601
|
# and a tool call response, where the text response just indicates the tool call will happen.
|
|
576
602
|
try:
|
|
603
|
+
alternatives: list[str] = []
|
|
577
604
|
if tool_calls:
|
|
578
605
|
async for event in self._handle_tool_calls(ctx, tool_calls):
|
|
579
606
|
yield event
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
elif invisible_parts:
|
|
584
|
-
# handle responses with only thinking or built-in tool parts.
|
|
585
|
-
# this can happen with models that support thinking mode when they don't provide
|
|
586
|
-
# actionable output alongside their thinking content. so we tell the model to try again.
|
|
587
|
-
m = _messages.RetryPromptPart(
|
|
588
|
-
content='Responses without text or tool calls are not permitted.',
|
|
589
|
-
)
|
|
590
|
-
raise ToolRetryError(m)
|
|
607
|
+
return
|
|
608
|
+
elif output_schema.toolset:
|
|
609
|
+
alternatives.append('include your response in a tool call')
|
|
591
610
|
else:
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
# resubmit the most recent request that resulted in an empty response,
|
|
614
|
-
# as the empty response and request will not create any items in the API payload,
|
|
615
|
-
# in the hope the model will return a non-empty response this time.
|
|
616
|
-
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
617
|
-
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
|
|
611
|
+
alternatives.append('call a tool')
|
|
612
|
+
|
|
613
|
+
if output_schema.allows_image:
|
|
614
|
+
if image := next((file for file in files if isinstance(file, _messages.BinaryImage)), None):
|
|
615
|
+
self._next_node = await self._handle_image_response(ctx, image)
|
|
616
|
+
return
|
|
617
|
+
alternatives.append('return an image')
|
|
618
|
+
|
|
619
|
+
if text_processor := output_schema.text_processor:
|
|
620
|
+
if text:
|
|
621
|
+
self._next_node = await self._handle_text_response(ctx, text, text_processor)
|
|
622
|
+
return
|
|
623
|
+
alternatives.insert(0, 'return text')
|
|
624
|
+
|
|
625
|
+
# handle responses with only parts that don't constitute output.
|
|
626
|
+
# This can happen with models that support thinking mode when they don't provide
|
|
627
|
+
# actionable output alongside their thinking content. so we tell the model to try again.
|
|
628
|
+
m = _messages.RetryPromptPart(
|
|
629
|
+
content=f'Please {" or ".join(alternatives)}.',
|
|
630
|
+
)
|
|
631
|
+
raise ToolRetryError(m)
|
|
618
632
|
except ToolRetryError as e:
|
|
619
633
|
ctx.state.increment_retries(ctx.deps.max_result_retries, e)
|
|
620
634
|
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
|
|
@@ -657,6 +671,28 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
657
671
|
_messages.ModelRequest(parts=output_parts, instructions=instructions)
|
|
658
672
|
)
|
|
659
673
|
|
|
674
|
+
async def _handle_text_response(
|
|
675
|
+
self,
|
|
676
|
+
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
677
|
+
text: str,
|
|
678
|
+
text_processor: _output.BaseOutputProcessor[NodeRunEndT],
|
|
679
|
+
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
680
|
+
run_context = build_run_context(ctx)
|
|
681
|
+
|
|
682
|
+
result_data = await text_processor.process(text, run_context)
|
|
683
|
+
|
|
684
|
+
for validator in ctx.deps.output_validators:
|
|
685
|
+
result_data = await validator.validate(result_data, run_context)
|
|
686
|
+
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
687
|
+
|
|
688
|
+
async def _handle_image_response(
|
|
689
|
+
self,
|
|
690
|
+
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
691
|
+
image: _messages.BinaryImage,
|
|
692
|
+
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
693
|
+
result_data = cast(NodeRunEndT, image)
|
|
694
|
+
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
695
|
+
|
|
660
696
|
def _handle_final_result(
|
|
661
697
|
self,
|
|
662
698
|
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
@@ -671,26 +707,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
671
707
|
|
|
672
708
|
return End(final_result)
|
|
673
709
|
|
|
674
|
-
async def _handle_text_response(
|
|
675
|
-
self,
|
|
676
|
-
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
677
|
-
text: str,
|
|
678
|
-
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
679
|
-
output_schema = ctx.deps.output_schema
|
|
680
|
-
run_context = build_run_context(ctx)
|
|
681
|
-
|
|
682
|
-
if isinstance(output_schema, _output.TextOutputSchema):
|
|
683
|
-
result_data = await output_schema.process(text, run_context)
|
|
684
|
-
else:
|
|
685
|
-
m = _messages.RetryPromptPart(
|
|
686
|
-
content='Plain text responses are not permitted, please include your response in a tool call',
|
|
687
|
-
)
|
|
688
|
-
raise ToolRetryError(m)
|
|
689
|
-
|
|
690
|
-
for validator in ctx.deps.output_validators:
|
|
691
|
-
result_data = await validator.validate(result_data, run_context)
|
|
692
|
-
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
693
|
-
|
|
694
710
|
__repr__ = dataclasses_no_defaults_repr
|
|
695
711
|
|
|
696
712
|
|
|
@@ -823,6 +839,7 @@ async def process_tool_calls( # noqa: C901
|
|
|
823
839
|
tool_calls=calls_to_run,
|
|
824
840
|
tool_call_results=calls_to_run_results,
|
|
825
841
|
tracer=ctx.deps.tracer,
|
|
842
|
+
usage=ctx.state.usage,
|
|
826
843
|
usage_limits=ctx.deps.usage_limits,
|
|
827
844
|
output_parts=output_parts,
|
|
828
845
|
output_deferred_calls=deferred_calls,
|
|
@@ -869,7 +886,8 @@ async def _call_tools(
|
|
|
869
886
|
tool_calls: list[_messages.ToolCallPart],
|
|
870
887
|
tool_call_results: dict[str, DeferredToolResult],
|
|
871
888
|
tracer: Tracer,
|
|
872
|
-
|
|
889
|
+
usage: _usage.RunUsage,
|
|
890
|
+
usage_limits: _usage.UsageLimits,
|
|
873
891
|
output_parts: list[_messages.ModelRequestPart],
|
|
874
892
|
output_deferred_calls: dict[Literal['external', 'unapproved'], list[_messages.ToolCallPart]],
|
|
875
893
|
) -> AsyncIterator[_messages.HandleResponseEvent]:
|
|
@@ -877,6 +895,11 @@ async def _call_tools(
|
|
|
877
895
|
user_parts_by_index: dict[int, _messages.UserPromptPart] = {}
|
|
878
896
|
deferred_calls_by_index: dict[int, Literal['external', 'unapproved']] = {}
|
|
879
897
|
|
|
898
|
+
if usage_limits.tool_calls_limit is not None:
|
|
899
|
+
projected_usage = deepcopy(usage)
|
|
900
|
+
projected_usage.tool_calls += len(tool_calls)
|
|
901
|
+
usage_limits.check_before_tool_call(projected_usage)
|
|
902
|
+
|
|
880
903
|
for call in tool_calls:
|
|
881
904
|
yield _messages.FunctionToolCallEvent(call)
|
|
882
905
|
|
|
@@ -890,13 +913,19 @@ async def _call_tools(
|
|
|
890
913
|
|
|
891
914
|
async def handle_call_or_result(
|
|
892
915
|
coro_or_task: Awaitable[
|
|
893
|
-
tuple[
|
|
916
|
+
tuple[
|
|
917
|
+
_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
|
|
918
|
+
]
|
|
894
919
|
]
|
|
895
|
-
| Task[
|
|
920
|
+
| Task[
|
|
921
|
+
tuple[
|
|
922
|
+
_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None
|
|
923
|
+
]
|
|
924
|
+
],
|
|
896
925
|
index: int,
|
|
897
926
|
) -> _messages.HandleResponseEvent | None:
|
|
898
927
|
try:
|
|
899
|
-
tool_part,
|
|
928
|
+
tool_part, tool_user_content = (
|
|
900
929
|
(await coro_or_task) if inspect.isawaitable(coro_or_task) else coro_or_task.result()
|
|
901
930
|
)
|
|
902
931
|
except exceptions.CallDeferred:
|
|
@@ -905,15 +934,15 @@ async def _call_tools(
|
|
|
905
934
|
deferred_calls_by_index[index] = 'unapproved'
|
|
906
935
|
else:
|
|
907
936
|
tool_parts_by_index[index] = tool_part
|
|
908
|
-
if
|
|
909
|
-
user_parts_by_index[index] =
|
|
937
|
+
if tool_user_content:
|
|
938
|
+
user_parts_by_index[index] = _messages.UserPromptPart(content=tool_user_content)
|
|
910
939
|
|
|
911
|
-
return _messages.FunctionToolResultEvent(tool_part)
|
|
940
|
+
return _messages.FunctionToolResultEvent(tool_part, content=tool_user_content)
|
|
912
941
|
|
|
913
942
|
if tool_manager.should_call_sequentially(tool_calls):
|
|
914
943
|
for index, call in enumerate(tool_calls):
|
|
915
944
|
if event := await handle_call_or_result(
|
|
916
|
-
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)
|
|
945
|
+
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
|
|
917
946
|
index,
|
|
918
947
|
):
|
|
919
948
|
yield event
|
|
@@ -921,7 +950,7 @@ async def _call_tools(
|
|
|
921
950
|
else:
|
|
922
951
|
tasks = [
|
|
923
952
|
asyncio.create_task(
|
|
924
|
-
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)
|
|
953
|
+
_call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id)),
|
|
925
954
|
name=call.tool_name,
|
|
926
955
|
)
|
|
927
956
|
for call in tool_calls
|
|
@@ -948,15 +977,14 @@ async def _call_tool(
|
|
|
948
977
|
tool_manager: ToolManager[DepsT],
|
|
949
978
|
tool_call: _messages.ToolCallPart,
|
|
950
979
|
tool_call_result: DeferredToolResult | None,
|
|
951
|
-
|
|
952
|
-
) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, _messages.UserPromptPart | None]:
|
|
980
|
+
) -> tuple[_messages.ToolReturnPart | _messages.RetryPromptPart, str | Sequence[_messages.UserContent] | None]:
|
|
953
981
|
try:
|
|
954
982
|
if tool_call_result is None:
|
|
955
|
-
tool_result = await tool_manager.handle_call(tool_call
|
|
983
|
+
tool_result = await tool_manager.handle_call(tool_call)
|
|
956
984
|
elif isinstance(tool_call_result, ToolApproved):
|
|
957
985
|
if tool_call_result.override_args is not None:
|
|
958
986
|
tool_call = dataclasses.replace(tool_call, args=tool_call_result.override_args)
|
|
959
|
-
tool_result = await tool_manager.handle_call(tool_call
|
|
987
|
+
tool_result = await tool_manager.handle_call(tool_call)
|
|
960
988
|
elif isinstance(tool_call_result, ToolDenied):
|
|
961
989
|
return _messages.ToolReturnPart(
|
|
962
990
|
tool_name=tool_call.tool_name,
|
|
@@ -1026,14 +1054,7 @@ async def _call_tool(
|
|
|
1026
1054
|
metadata=tool_return.metadata,
|
|
1027
1055
|
)
|
|
1028
1056
|
|
|
1029
|
-
|
|
1030
|
-
if tool_return.content:
|
|
1031
|
-
user_part = _messages.UserPromptPart(
|
|
1032
|
-
content=tool_return.content,
|
|
1033
|
-
part_kind='user-prompt',
|
|
1034
|
-
)
|
|
1035
|
-
|
|
1036
|
-
return return_part, user_part
|
|
1057
|
+
return return_part, tool_return.content or None
|
|
1037
1058
|
|
|
1038
1059
|
|
|
1039
1060
|
@dataclasses.dataclass
|
pydantic_ai/_cli.py
CHANGED
|
@@ -18,7 +18,7 @@ from . import __version__
|
|
|
18
18
|
from ._run_context import AgentDepsT
|
|
19
19
|
from .agent import AbstractAgent, Agent
|
|
20
20
|
from .exceptions import UserError
|
|
21
|
-
from .messages import ModelMessage,
|
|
21
|
+
from .messages import ModelMessage, ModelResponse
|
|
22
22
|
from .models import KnownModelName, infer_model
|
|
23
23
|
from .output import OutputDataT
|
|
24
24
|
|
|
@@ -351,14 +351,11 @@ def handle_slash_command(
|
|
|
351
351
|
console.print('[dim]Exiting…[/dim]')
|
|
352
352
|
return 0, multiline
|
|
353
353
|
elif ident_prompt == '/cp':
|
|
354
|
-
|
|
355
|
-
parts = messages[-1].parts
|
|
356
|
-
except IndexError:
|
|
354
|
+
if not messages or not isinstance(messages[-1], ModelResponse):
|
|
357
355
|
console.print('[dim]No output available to copy.[/dim]')
|
|
358
356
|
else:
|
|
359
|
-
text_to_copy =
|
|
360
|
-
text_to_copy
|
|
361
|
-
if text_to_copy:
|
|
357
|
+
text_to_copy = messages[-1].text
|
|
358
|
+
if text_to_copy and (text_to_copy := text_to_copy.strip()):
|
|
362
359
|
pyperclip.copy(text_to_copy)
|
|
363
360
|
console.print('[dim]Copied last output to clipboard.[/dim]')
|
|
364
361
|
else:
|