pydantic-ai-slim 0.3.2__tar.gz → 0.3.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/PKG-INFO +4 -4
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/__init__.py +5 -2
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_agent_graph.py +40 -16
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_cli.py +7 -3
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_function_schema.py +1 -4
- pydantic_ai_slim-0.3.4/pydantic_ai/_output.py +934 -0
- pydantic_ai_slim-0.3.4/pydantic_ai/_run_context.py +56 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_system_prompt.py +2 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_utils.py +111 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/agent.py +57 -34
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/messages.py +20 -11
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/__init__.py +21 -2
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/anthropic.py +7 -9
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/function.py +21 -3
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/gemini.py +27 -4
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/google.py +29 -4
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/instrumented.py +5 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/mistral.py +5 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/openai.py +70 -9
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/test.py +1 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/wrapper.py +6 -0
- pydantic_ai_slim-0.3.4/pydantic_ai/output.py +288 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/__init__.py +21 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/_json_schema.py +1 -1
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/google.py +6 -2
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/openai.py +5 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/result.py +52 -26
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/tools.py +5 -49
- pydantic_ai_slim-0.3.2/pydantic_ai/_output.py +0 -439
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/.gitignore +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/LICENSE +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/README.md +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.3.2 → pydantic_ai_slim-0.3.4}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.4
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.3.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.4
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.3.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.4; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.3.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.4; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -12,7 +12,7 @@ from .exceptions import (
|
|
|
12
12
|
)
|
|
13
13
|
from .format_prompt import format_as_xml
|
|
14
14
|
from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, VideoUrl
|
|
15
|
-
from .
|
|
15
|
+
from .output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
|
|
16
16
|
from .tools import RunContext, Tool
|
|
17
17
|
|
|
18
18
|
__all__ = (
|
|
@@ -41,8 +41,11 @@ __all__ = (
|
|
|
41
41
|
# tools
|
|
42
42
|
'Tool',
|
|
43
43
|
'RunContext',
|
|
44
|
-
#
|
|
44
|
+
# output
|
|
45
45
|
'ToolOutput',
|
|
46
|
+
'NativeOutput',
|
|
47
|
+
'PromptedOutput',
|
|
48
|
+
'TextOutput',
|
|
46
49
|
# format_prompt
|
|
47
50
|
'format_as_xml',
|
|
48
51
|
)
|
|
@@ -18,12 +18,13 @@ from pydantic_graph import BaseNode, Graph, GraphRunContext
|
|
|
18
18
|
from pydantic_graph.nodes import End, NodeRunEndT
|
|
19
19
|
|
|
20
20
|
from . import _output, _system_prompt, exceptions, messages as _messages, models, result, usage as _usage
|
|
21
|
-
from .
|
|
21
|
+
from .output import OutputDataT, OutputSpec
|
|
22
22
|
from .settings import ModelSettings, merge_model_settings
|
|
23
23
|
from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
|
|
24
24
|
|
|
25
25
|
if TYPE_CHECKING:
|
|
26
26
|
from .mcp import MCPServer
|
|
27
|
+
from .models.instrumented import InstrumentationSettings
|
|
27
28
|
|
|
28
29
|
__all__ = (
|
|
29
30
|
'GraphAgentState',
|
|
@@ -102,7 +103,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
102
103
|
end_strategy: EndStrategy
|
|
103
104
|
get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]]
|
|
104
105
|
|
|
105
|
-
output_schema: _output.OutputSchema[OutputDataT]
|
|
106
|
+
output_schema: _output.OutputSchema[OutputDataT]
|
|
106
107
|
output_validators: list[_output.OutputValidator[DepsT, OutputDataT]]
|
|
107
108
|
|
|
108
109
|
history_processors: Sequence[HistoryProcessor[DepsT]]
|
|
@@ -112,6 +113,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
112
113
|
default_retries: int
|
|
113
114
|
|
|
114
115
|
tracer: Tracer
|
|
116
|
+
instrumentation_settings: InstrumentationSettings | None = None
|
|
115
117
|
|
|
116
118
|
prepare_tools: ToolsPrepareFunc[DepsT] | None = None
|
|
117
119
|
|
|
@@ -141,6 +143,8 @@ def is_agent_node(
|
|
|
141
143
|
|
|
142
144
|
@dataclasses.dataclass
|
|
143
145
|
class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
146
|
+
"""The node that handles the user prompt and instructions."""
|
|
147
|
+
|
|
144
148
|
user_prompt: str | Sequence[_messages.UserContent] | None
|
|
145
149
|
|
|
146
150
|
instructions: str | None
|
|
@@ -284,16 +288,29 @@ async def _prepare_request_parameters(
|
|
|
284
288
|
function_tool_defs = await ctx.deps.prepare_tools(run_context, function_tool_defs) or []
|
|
285
289
|
|
|
286
290
|
output_schema = ctx.deps.output_schema
|
|
291
|
+
|
|
292
|
+
output_tools = []
|
|
293
|
+
output_object = None
|
|
294
|
+
if isinstance(output_schema, _output.ToolOutputSchema):
|
|
295
|
+
output_tools = output_schema.tool_defs()
|
|
296
|
+
elif isinstance(output_schema, _output.NativeOutputSchema):
|
|
297
|
+
output_object = output_schema.object_def
|
|
298
|
+
|
|
299
|
+
# ToolOrTextOutputSchema, NativeOutputSchema, and PromptedOutputSchema all inherit from TextOutputSchema
|
|
300
|
+
allow_text_output = isinstance(output_schema, _output.TextOutputSchema)
|
|
301
|
+
|
|
287
302
|
return models.ModelRequestParameters(
|
|
288
303
|
function_tools=function_tool_defs,
|
|
289
|
-
|
|
290
|
-
output_tools=
|
|
304
|
+
output_mode=output_schema.mode,
|
|
305
|
+
output_tools=output_tools,
|
|
306
|
+
output_object=output_object,
|
|
307
|
+
allow_text_output=allow_text_output,
|
|
291
308
|
)
|
|
292
309
|
|
|
293
310
|
|
|
294
311
|
@dataclasses.dataclass
|
|
295
312
|
class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
296
|
-
"""
|
|
313
|
+
"""The node that makes a request to the model using the last message in state.message_history."""
|
|
297
314
|
|
|
298
315
|
request: _messages.ModelRequest
|
|
299
316
|
|
|
@@ -412,7 +429,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
412
429
|
|
|
413
430
|
@dataclasses.dataclass
|
|
414
431
|
class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
415
|
-
"""
|
|
432
|
+
"""The node that processes a model response, and decides whether to end the run or make a new request."""
|
|
416
433
|
|
|
417
434
|
model_response: _messages.ModelResponse
|
|
418
435
|
|
|
@@ -482,7 +499,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
482
499
|
# when the model has already returned text along side tool calls
|
|
483
500
|
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
484
501
|
# response, if any
|
|
485
|
-
if
|
|
502
|
+
if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
|
|
486
503
|
for message in reversed(ctx.state.message_history):
|
|
487
504
|
if isinstance(message, _messages.ModelResponse):
|
|
488
505
|
last_texts = [p.content for p in message.parts if isinstance(p, _messages.TextPart)]
|
|
@@ -505,10 +522,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
505
522
|
output_schema = ctx.deps.output_schema
|
|
506
523
|
run_context = build_run_context(ctx)
|
|
507
524
|
|
|
508
|
-
# first, look for the output tool call
|
|
509
525
|
final_result: result.FinalResult[NodeRunEndT] | None = None
|
|
510
526
|
parts: list[_messages.ModelRequestPart] = []
|
|
511
|
-
|
|
527
|
+
|
|
528
|
+
# first, look for the output tool call
|
|
529
|
+
if isinstance(output_schema, _output.ToolOutputSchema):
|
|
512
530
|
for call, output_tool in output_schema.find_tool(tool_calls):
|
|
513
531
|
try:
|
|
514
532
|
result_data = await output_tool.process(call, run_context)
|
|
@@ -566,9 +584,9 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
566
584
|
|
|
567
585
|
text = '\n\n'.join(texts)
|
|
568
586
|
try:
|
|
569
|
-
if _output.
|
|
570
|
-
|
|
571
|
-
result_data =
|
|
587
|
+
if isinstance(output_schema, _output.TextOutputSchema):
|
|
588
|
+
run_context = build_run_context(ctx)
|
|
589
|
+
result_data = await output_schema.process(text, run_context)
|
|
572
590
|
else:
|
|
573
591
|
m = _messages.RetryPromptPart(
|
|
574
592
|
content='Plain text responses are not permitted, please include your response in a tool call',
|
|
@@ -667,7 +685,7 @@ async def process_function_tools( # noqa C901
|
|
|
667
685
|
yield event
|
|
668
686
|
call_index_to_event_id[len(calls_to_run)] = event.call_id
|
|
669
687
|
calls_to_run.append((mcp_tool, call))
|
|
670
|
-
elif
|
|
688
|
+
elif call.tool_name in output_schema.tools:
|
|
671
689
|
# if tool_name is in output_schema, it means we found a output tool but an error occurred in
|
|
672
690
|
# validation, we don't add another part here
|
|
673
691
|
if output_tool_name is not None:
|
|
@@ -696,6 +714,10 @@ async def process_function_tools( # noqa C901
|
|
|
696
714
|
|
|
697
715
|
user_parts: list[_messages.UserPromptPart] = []
|
|
698
716
|
|
|
717
|
+
include_content = (
|
|
718
|
+
ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content
|
|
719
|
+
)
|
|
720
|
+
|
|
699
721
|
# Run all tool tasks in parallel
|
|
700
722
|
results_by_index: dict[int, _messages.ModelRequestPart] = {}
|
|
701
723
|
with ctx.deps.tracer.start_as_current_span(
|
|
@@ -706,7 +728,7 @@ async def process_function_tools( # noqa C901
|
|
|
706
728
|
},
|
|
707
729
|
):
|
|
708
730
|
tasks = [
|
|
709
|
-
asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer), name=call.tool_name)
|
|
731
|
+
asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer, include_content), name=call.tool_name)
|
|
710
732
|
for tool, call in calls_to_run
|
|
711
733
|
]
|
|
712
734
|
|
|
@@ -807,7 +829,9 @@ def _unknown_tool(
|
|
|
807
829
|
) -> _messages.RetryPromptPart:
|
|
808
830
|
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
809
831
|
tool_names = list(ctx.deps.function_tools.keys())
|
|
810
|
-
|
|
832
|
+
|
|
833
|
+
output_schema = ctx.deps.output_schema
|
|
834
|
+
if isinstance(output_schema, _output.ToolOutputSchema):
|
|
811
835
|
tool_names.extend(output_schema.tool_names())
|
|
812
836
|
|
|
813
837
|
if tool_names:
|
|
@@ -884,7 +908,7 @@ def get_captured_run_messages() -> _RunMessages:
|
|
|
884
908
|
def build_agent_graph(
|
|
885
909
|
name: str | None,
|
|
886
910
|
deps_type: type[DepsT],
|
|
887
|
-
output_type:
|
|
911
|
+
output_type: OutputSpec[OutputT],
|
|
888
912
|
) -> Graph[GraphAgentState, GraphAgentDeps[DepsT, result.FinalResult[OutputT]], result.FinalResult[OutputT]]:
|
|
889
913
|
"""Build the execution [Graph][pydantic_graph.Graph] for a given agent."""
|
|
890
914
|
nodes = (
|
|
@@ -14,14 +14,13 @@ from typing import Any, cast
|
|
|
14
14
|
|
|
15
15
|
from typing_inspection.introspection import get_literal_values
|
|
16
16
|
|
|
17
|
-
from pydantic_ai.result import OutputDataT
|
|
18
|
-
from pydantic_ai.tools import AgentDepsT
|
|
19
|
-
|
|
20
17
|
from . import __version__
|
|
18
|
+
from ._run_context import AgentDepsT
|
|
21
19
|
from .agent import Agent
|
|
22
20
|
from .exceptions import UserError
|
|
23
21
|
from .messages import ModelMessage
|
|
24
22
|
from .models import KnownModelName, infer_model
|
|
23
|
+
from .output import OutputDataT
|
|
25
24
|
|
|
26
25
|
try:
|
|
27
26
|
import argcomplete
|
|
@@ -254,6 +253,11 @@ async def run_chat(
|
|
|
254
253
|
messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages)
|
|
255
254
|
except CancelledError: # pragma: no cover
|
|
256
255
|
console.print('[dim]Interrupted[/dim]')
|
|
256
|
+
except Exception as e: # pragma: no cover
|
|
257
|
+
cause = getattr(e, '__cause__', None)
|
|
258
|
+
console.print(f'\n[red]{type(e).__name__}:[/red] {e}')
|
|
259
|
+
if cause:
|
|
260
|
+
console.print(f'[dim]Caused by: {cause}[/dim]')
|
|
257
261
|
|
|
258
262
|
|
|
259
263
|
async def ask_agent(
|
|
@@ -19,9 +19,8 @@ from pydantic.plugin._schema_validator import create_schema_validator
|
|
|
19
19
|
from pydantic_core import SchemaValidator, core_schema
|
|
20
20
|
from typing_extensions import Concatenate, ParamSpec, TypeIs, TypeVar, get_origin
|
|
21
21
|
|
|
22
|
-
from pydantic_ai.tools import RunContext
|
|
23
|
-
|
|
24
22
|
from ._griffe import doc_descriptions
|
|
23
|
+
from ._run_context import RunContext
|
|
25
24
|
from ._utils import check_object_json_schema, is_async_callable, is_model_like, run_in_executor
|
|
26
25
|
|
|
27
26
|
if TYPE_CHECKING:
|
|
@@ -281,6 +280,4 @@ def _build_schema(
|
|
|
281
280
|
|
|
282
281
|
def _is_call_ctx(annotation: Any) -> bool:
|
|
283
282
|
"""Return whether the annotation is the `RunContext` class, parameterized or not."""
|
|
284
|
-
from .tools import RunContext
|
|
285
|
-
|
|
286
283
|
return annotation is RunContext or get_origin(annotation) is RunContext
|