pydantic-ai-slim 1.0.9__tar.gz → 1.0.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/PKG-INFO +6 -6
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_agent_graph.py +59 -53
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_function_schema.py +18 -10
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_output.py +1 -8
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/agent/__init__.py +2 -1
- pydantic_ai_slim-1.0.11/pydantic_ai/format_prompt.py +205 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/mcp.py +1 -1
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/messages.py +3 -5
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/__init__.py +2 -81
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/anthropic.py +15 -9
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/google.py +27 -17
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/instrumented.py +27 -11
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/openai.py +39 -7
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/output.py +12 -1
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/harmony.py +3 -1
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/ollama.py +2 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pyproject.toml +3 -3
- pydantic_ai_slim-1.0.9/pydantic_ai/format_prompt.py +0 -113
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/.gitignore +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/LICENSE +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/README.md +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_otel_messages.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_tool_manager.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/agent/abstract.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/agent/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/builtin_tools.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/groq.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/cerebras.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/gateway.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/litellm.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/moonshotai.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/providers/vercel.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/retries.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/run.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/_dynamic.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/approval_required.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/external.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-1.0.9 → pydantic_ai_slim-1.0.11}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.11
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -29,11 +29,11 @@ Classifier: Topic :: Internet
|
|
|
29
29
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
30
30
|
Requires-Python: >=3.10
|
|
31
31
|
Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
32
|
-
Requires-Dist: genai-prices>=0.0.
|
|
32
|
+
Requires-Dist: genai-prices>=0.0.28
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.11
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -42,7 +42,7 @@ Provides-Extra: ag-ui
|
|
|
42
42
|
Requires-Dist: ag-ui-protocol>=0.1.8; extra == 'ag-ui'
|
|
43
43
|
Requires-Dist: starlette>=0.45.3; extra == 'ag-ui'
|
|
44
44
|
Provides-Extra: anthropic
|
|
45
|
-
Requires-Dist: anthropic>=0.
|
|
45
|
+
Requires-Dist: anthropic>=0.69.0; extra == 'anthropic'
|
|
46
46
|
Provides-Extra: bedrock
|
|
47
47
|
Requires-Dist: boto3>=1.39.0; extra == 'bedrock'
|
|
48
48
|
Provides-Extra: cli
|
|
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.11; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -77,7 +77,7 @@ Requires-Dist: tenacity>=8.2.3; extra == 'retries'
|
|
|
77
77
|
Provides-Extra: tavily
|
|
78
78
|
Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
|
|
79
79
|
Provides-Extra: temporal
|
|
80
|
-
Requires-Dist: temporalio==1.
|
|
80
|
+
Requires-Dist: temporalio==1.18.0; extra == 'temporal'
|
|
81
81
|
Provides-Extra: vertexai
|
|
82
82
|
Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
|
|
83
83
|
Requires-Dist: requests>=2.32.2; extra == 'vertexai'
|
|
@@ -547,7 +547,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
547
547
|
async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa: C901
|
|
548
548
|
text = ''
|
|
549
549
|
tool_calls: list[_messages.ToolCallPart] = []
|
|
550
|
-
|
|
550
|
+
invisible_parts: bool = False
|
|
551
551
|
|
|
552
552
|
for part in self.model_response.parts:
|
|
553
553
|
if isinstance(part, _messages.TextPart):
|
|
@@ -558,11 +558,13 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
558
558
|
# Text parts before a built-in tool call are essentially thoughts,
|
|
559
559
|
# not part of the final result output, so we reset the accumulated text
|
|
560
560
|
text = ''
|
|
561
|
+
invisible_parts = True
|
|
561
562
|
yield _messages.BuiltinToolCallEvent(part) # pyright: ignore[reportDeprecated]
|
|
562
563
|
elif isinstance(part, _messages.BuiltinToolReturnPart):
|
|
564
|
+
invisible_parts = True
|
|
563
565
|
yield _messages.BuiltinToolResultEvent(part) # pyright: ignore[reportDeprecated]
|
|
564
566
|
elif isinstance(part, _messages.ThinkingPart):
|
|
565
|
-
|
|
567
|
+
invisible_parts = True
|
|
566
568
|
else:
|
|
567
569
|
assert_never(part)
|
|
568
570
|
|
|
@@ -570,43 +572,51 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
570
572
|
# In the future, we'd consider making this configurable at the agent or run level.
|
|
571
573
|
# This accounts for cases like anthropic returns that might contain a text response
|
|
572
574
|
# and a tool call response, where the text response just indicates the tool call will happen.
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
_messages.
|
|
585
|
-
|
|
575
|
+
try:
|
|
576
|
+
if tool_calls:
|
|
577
|
+
async for event in self._handle_tool_calls(ctx, tool_calls):
|
|
578
|
+
yield event
|
|
579
|
+
elif text:
|
|
580
|
+
# No events are emitted during the handling of text responses, so we don't need to yield anything
|
|
581
|
+
self._next_node = await self._handle_text_response(ctx, text)
|
|
582
|
+
elif invisible_parts:
|
|
583
|
+
# handle responses with only thinking or built-in tool parts.
|
|
584
|
+
# this can happen with models that support thinking mode when they don't provide
|
|
585
|
+
# actionable output alongside their thinking content. so we tell the model to try again.
|
|
586
|
+
m = _messages.RetryPromptPart(
|
|
587
|
+
content='Responses without text or tool calls are not permitted.',
|
|
586
588
|
)
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
589
|
+
raise ToolRetryError(m)
|
|
590
|
+
else:
|
|
591
|
+
# we got an empty response with no tool calls, text, thinking, or built-in tool calls.
|
|
592
|
+
# this sometimes happens with anthropic (and perhaps other models)
|
|
593
|
+
# when the model has already returned text along side tool calls
|
|
594
|
+
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
595
|
+
# response, if any
|
|
596
|
+
if isinstance(ctx.deps.output_schema, _output.TextOutputSchema):
|
|
597
|
+
for message in reversed(ctx.state.message_history):
|
|
598
|
+
if isinstance(message, _messages.ModelResponse):
|
|
599
|
+
text = ''
|
|
600
|
+
for part in message.parts:
|
|
601
|
+
if isinstance(part, _messages.TextPart):
|
|
602
|
+
text += part.content
|
|
603
|
+
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
604
|
+
# Text parts before a built-in tool call are essentially thoughts,
|
|
605
|
+
# not part of the final result output, so we reset the accumulated text
|
|
606
|
+
text = '' # pragma: no cover
|
|
607
|
+
if text:
|
|
608
|
+
self._next_node = await self._handle_text_response(ctx, text)
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
# Go back to the model request node with an empty request, which means we'll essentially
|
|
612
|
+
# resubmit the most recent request that resulted in an empty response,
|
|
613
|
+
# as the empty response and request will not create any items in the API payload,
|
|
614
|
+
# in the hope the model will return a non-empty response this time.
|
|
615
|
+
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
616
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
|
|
617
|
+
except ToolRetryError as e:
|
|
618
|
+
ctx.state.increment_retries(ctx.deps.max_result_retries, e)
|
|
619
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
|
|
610
620
|
|
|
611
621
|
self._events_iterator = _run_stream()
|
|
612
622
|
|
|
@@ -666,23 +676,19 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
666
676
|
text: str,
|
|
667
677
|
) -> ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]]:
|
|
668
678
|
output_schema = ctx.deps.output_schema
|
|
669
|
-
|
|
670
|
-
run_context = build_run_context(ctx)
|
|
671
|
-
if isinstance(output_schema, _output.TextOutputSchema):
|
|
672
|
-
result_data = await output_schema.process(text, run_context)
|
|
673
|
-
else:
|
|
674
|
-
m = _messages.RetryPromptPart(
|
|
675
|
-
content='Plain text responses are not permitted, please include your response in a tool call',
|
|
676
|
-
)
|
|
677
|
-
raise ToolRetryError(m)
|
|
679
|
+
run_context = build_run_context(ctx)
|
|
678
680
|
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
except ToolRetryError as e:
|
|
682
|
-
ctx.state.increment_retries(ctx.deps.max_result_retries, e)
|
|
683
|
-
return ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
|
|
681
|
+
if isinstance(output_schema, _output.TextOutputSchema):
|
|
682
|
+
result_data = await output_schema.process(text, run_context)
|
|
684
683
|
else:
|
|
685
|
-
|
|
684
|
+
m = _messages.RetryPromptPart(
|
|
685
|
+
content='Plain text responses are not permitted, please include your response in a tool call',
|
|
686
|
+
)
|
|
687
|
+
raise ToolRetryError(m)
|
|
688
|
+
|
|
689
|
+
for validator in ctx.deps.output_validators:
|
|
690
|
+
result_data = await validator.validate(result_data, run_context)
|
|
691
|
+
return self._handle_final_result(ctx, result.FinalResult(result_data), [])
|
|
686
692
|
|
|
687
693
|
__repr__ = dataclasses_no_defaults_repr
|
|
688
694
|
|
|
@@ -231,31 +231,39 @@ R = TypeVar('R')
|
|
|
231
231
|
|
|
232
232
|
WithCtx = Callable[Concatenate[RunContext[Any], P], R]
|
|
233
233
|
WithoutCtx = Callable[P, R]
|
|
234
|
-
|
|
234
|
+
TargetCallable = WithCtx[P, R] | WithoutCtx[P, R]
|
|
235
235
|
|
|
236
236
|
|
|
237
|
-
def _takes_ctx(
|
|
238
|
-
"""Check if a
|
|
237
|
+
def _takes_ctx(callable_obj: TargetCallable[P, R]) -> TypeIs[WithCtx[P, R]]:
|
|
238
|
+
"""Check if a callable takes a `RunContext` first argument.
|
|
239
239
|
|
|
240
240
|
Args:
|
|
241
|
-
|
|
241
|
+
callable_obj: The callable to check.
|
|
242
242
|
|
|
243
243
|
Returns:
|
|
244
|
-
`True` if the
|
|
244
|
+
`True` if the callable takes a `RunContext` as first argument, `False` otherwise.
|
|
245
245
|
"""
|
|
246
246
|
try:
|
|
247
|
-
sig = signature(
|
|
248
|
-
except ValueError:
|
|
249
|
-
return False
|
|
247
|
+
sig = signature(callable_obj)
|
|
248
|
+
except ValueError:
|
|
249
|
+
return False
|
|
250
250
|
try:
|
|
251
251
|
first_param_name = next(iter(sig.parameters.keys()))
|
|
252
252
|
except StopIteration:
|
|
253
253
|
return False
|
|
254
254
|
else:
|
|
255
|
-
|
|
255
|
+
# See https://github.com/pydantic/pydantic/pull/11451 for a similar implementation in Pydantic
|
|
256
|
+
if not isinstance(callable_obj, _decorators._function_like): # pyright: ignore[reportPrivateUsage]
|
|
257
|
+
call_func = getattr(type(callable_obj), '__call__', None)
|
|
258
|
+
if call_func is not None:
|
|
259
|
+
callable_obj = call_func
|
|
260
|
+
else:
|
|
261
|
+
return False # pragma: no cover
|
|
262
|
+
|
|
263
|
+
type_hints = _typing_extra.get_function_type_hints(_decorators.unwrap_wrapped_function(callable_obj))
|
|
256
264
|
annotation = type_hints.get(first_param_name)
|
|
257
265
|
if annotation is None:
|
|
258
|
-
return False
|
|
266
|
+
return False
|
|
259
267
|
return True is not sig.empty and _is_call_ctx(annotation)
|
|
260
268
|
|
|
261
269
|
|
|
@@ -19,6 +19,7 @@ from .output import (
|
|
|
19
19
|
NativeOutput,
|
|
20
20
|
OutputDataT,
|
|
21
21
|
OutputMode,
|
|
22
|
+
OutputObjectDefinition,
|
|
22
23
|
OutputSpec,
|
|
23
24
|
OutputTypeOrFunction,
|
|
24
25
|
PromptedOutput,
|
|
@@ -581,14 +582,6 @@ class ToolOrTextOutputSchema(ToolOutputSchema[OutputDataT], PlainTextOutputSchem
|
|
|
581
582
|
return 'tool_or_text'
|
|
582
583
|
|
|
583
584
|
|
|
584
|
-
@dataclass
|
|
585
|
-
class OutputObjectDefinition:
|
|
586
|
-
json_schema: ObjectJsonSchema
|
|
587
|
-
name: str | None = None
|
|
588
|
-
description: str | None = None
|
|
589
|
-
strict: bool | None = None
|
|
590
|
-
|
|
591
|
-
|
|
592
585
|
@dataclass(init=False)
|
|
593
586
|
class BaseOutputProcessor(ABC, Generic[OutputDataT]):
|
|
594
587
|
@abstractmethod
|
|
@@ -259,7 +259,8 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
259
259
|
name: The name of the agent, used for logging. If `None`, we try to infer the agent name from the call frame
|
|
260
260
|
when the agent is first run.
|
|
261
261
|
model_settings: Optional model request settings to use for this agent's runs, by default.
|
|
262
|
-
retries: The default number of retries to allow before raising an error.
|
|
262
|
+
retries: The default number of retries to allow for tool calls and output validation, before raising an error.
|
|
263
|
+
For model request retries, see the [HTTP Request Retries](../retries.md) documentation.
|
|
263
264
|
output_retries: The maximum number of retries to allow for output validation, defaults to `retries`.
|
|
264
265
|
tools: Tools to register with the agent, you can also register tools via the decorators
|
|
265
266
|
[`@agent.tool`][pydantic_ai.Agent.tool] and [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain].
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Iterable, Iterator, Mapping
|
|
4
|
+
from dataclasses import asdict, dataclass, field, fields, is_dataclass
|
|
5
|
+
from datetime import date
|
|
6
|
+
from typing import Any, Literal
|
|
7
|
+
from xml.etree import ElementTree
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
__all__ = ('format_as_xml',)
|
|
12
|
+
|
|
13
|
+
from pydantic.fields import ComputedFieldInfo, FieldInfo
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def format_as_xml(
|
|
17
|
+
obj: Any,
|
|
18
|
+
root_tag: str | None = None,
|
|
19
|
+
item_tag: str = 'item',
|
|
20
|
+
none_str: str = 'null',
|
|
21
|
+
indent: str | None = ' ',
|
|
22
|
+
include_field_info: Literal['once'] | bool = False,
|
|
23
|
+
) -> str:
|
|
24
|
+
"""Format a Python object as XML.
|
|
25
|
+
|
|
26
|
+
This is useful since LLMs often find it easier to read semi-structured data (e.g. examples) as XML,
|
|
27
|
+
rather than JSON etc.
|
|
28
|
+
|
|
29
|
+
Supports: `str`, `bytes`, `bytearray`, `bool`, `int`, `float`, `date`, `datetime`, `Mapping`,
|
|
30
|
+
`Iterable`, `dataclass`, and `BaseModel`.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
obj: Python Object to serialize to XML.
|
|
34
|
+
root_tag: Outer tag to wrap the XML in, use `None` to omit the outer tag.
|
|
35
|
+
item_tag: Tag to use for each item in an iterable (e.g. list), this is overridden by the class name
|
|
36
|
+
for dataclasses and Pydantic models.
|
|
37
|
+
none_str: String to use for `None` values.
|
|
38
|
+
indent: Indentation string to use for pretty printing.
|
|
39
|
+
include_field_info: Whether to include attributes like Pydantic `Field` attributes and dataclasses `field()`
|
|
40
|
+
`metadata` as XML attributes. In both cases the allowed `Field` attributes and `field()` metadata keys are
|
|
41
|
+
`title` and `description`. If a field is repeated in the data (e.g. in a list) by setting `once`
|
|
42
|
+
the attributes are included only in the first occurrence of an XML element relative to the same field.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
XML representation of the object.
|
|
46
|
+
|
|
47
|
+
Example:
|
|
48
|
+
```python {title="format_as_xml_example.py" lint="skip"}
|
|
49
|
+
from pydantic_ai import format_as_xml
|
|
50
|
+
|
|
51
|
+
print(format_as_xml({'name': 'John', 'height': 6, 'weight': 200}, root_tag='user'))
|
|
52
|
+
'''
|
|
53
|
+
<user>
|
|
54
|
+
<name>John</name>
|
|
55
|
+
<height>6</height>
|
|
56
|
+
<weight>200</weight>
|
|
57
|
+
</user>
|
|
58
|
+
'''
|
|
59
|
+
```
|
|
60
|
+
"""
|
|
61
|
+
el = _ToXml(
|
|
62
|
+
data=obj,
|
|
63
|
+
item_tag=item_tag,
|
|
64
|
+
none_str=none_str,
|
|
65
|
+
include_field_info=include_field_info,
|
|
66
|
+
).to_xml(root_tag)
|
|
67
|
+
if root_tag is None and el.text is None:
|
|
68
|
+
join = '' if indent is None else '\n'
|
|
69
|
+
return join.join(_rootless_xml_elements(el, indent))
|
|
70
|
+
else:
|
|
71
|
+
if indent is not None:
|
|
72
|
+
ElementTree.indent(el, space=indent)
|
|
73
|
+
return ElementTree.tostring(el, encoding='unicode')
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class _ToXml:
|
|
78
|
+
data: Any
|
|
79
|
+
item_tag: str
|
|
80
|
+
none_str: str
|
|
81
|
+
include_field_info: Literal['once'] | bool
|
|
82
|
+
# a map of Pydantic and dataclasses Field paths to their metadata:
|
|
83
|
+
# a field unique string representation and its class
|
|
84
|
+
_fields_info: dict[str, tuple[str, FieldInfo | ComputedFieldInfo]] = field(default_factory=dict)
|
|
85
|
+
# keep track of fields we have extracted attributes from
|
|
86
|
+
_included_fields: set[str] = field(default_factory=set)
|
|
87
|
+
# keep track of class names for dataclasses and Pydantic models, that occur in lists
|
|
88
|
+
_element_names: dict[str, str] = field(default_factory=dict)
|
|
89
|
+
# flag for parsing dataclasses and Pydantic models once
|
|
90
|
+
_is_info_extracted: bool = False
|
|
91
|
+
_FIELD_ATTRIBUTES = ('title', 'description')
|
|
92
|
+
|
|
93
|
+
def to_xml(self, tag: str | None = None) -> ElementTree.Element:
|
|
94
|
+
return self._to_xml(value=self.data, path='', tag=tag)
|
|
95
|
+
|
|
96
|
+
def _to_xml(self, value: Any, path: str, tag: str | None = None) -> ElementTree.Element:
|
|
97
|
+
element = self._create_element(self.item_tag if tag is None else tag, path)
|
|
98
|
+
if value is None:
|
|
99
|
+
element.text = self.none_str
|
|
100
|
+
elif isinstance(value, str):
|
|
101
|
+
element.text = value
|
|
102
|
+
elif isinstance(value, bytes | bytearray):
|
|
103
|
+
element.text = value.decode(errors='ignore')
|
|
104
|
+
elif isinstance(value, bool | int | float):
|
|
105
|
+
element.text = str(value)
|
|
106
|
+
elif isinstance(value, date):
|
|
107
|
+
element.text = value.isoformat()
|
|
108
|
+
elif isinstance(value, Mapping):
|
|
109
|
+
if tag is None and path in self._element_names:
|
|
110
|
+
element.tag = self._element_names[path]
|
|
111
|
+
self._mapping_to_xml(element, value, path) # pyright: ignore[reportUnknownArgumentType]
|
|
112
|
+
elif is_dataclass(value) and not isinstance(value, type):
|
|
113
|
+
self._init_structure_info()
|
|
114
|
+
if tag is None:
|
|
115
|
+
element.tag = value.__class__.__name__
|
|
116
|
+
self._mapping_to_xml(element, asdict(value), path)
|
|
117
|
+
elif isinstance(value, BaseModel):
|
|
118
|
+
self._init_structure_info()
|
|
119
|
+
if tag is None:
|
|
120
|
+
element.tag = value.__class__.__name__
|
|
121
|
+
# by dumping the model we loose all metadata in nested data structures,
|
|
122
|
+
# but we have collected it when called _init_structure_info
|
|
123
|
+
self._mapping_to_xml(element, value.model_dump(), path)
|
|
124
|
+
elif isinstance(value, Iterable):
|
|
125
|
+
for n, item in enumerate(value): # pyright: ignore[reportUnknownVariableType,reportUnknownArgumentType]
|
|
126
|
+
element.append(self._to_xml(value=item, path=f'{path}.[{n}]' if path else f'[{n}]'))
|
|
127
|
+
else:
|
|
128
|
+
raise TypeError(f'Unsupported type for XML formatting: {type(value)}')
|
|
129
|
+
return element
|
|
130
|
+
|
|
131
|
+
def _create_element(self, tag: str, path: str) -> ElementTree.Element:
|
|
132
|
+
element = ElementTree.Element(tag)
|
|
133
|
+
if path in self._fields_info:
|
|
134
|
+
field_repr, field_info = self._fields_info[path]
|
|
135
|
+
if self.include_field_info and self.include_field_info != 'once' or field_repr not in self._included_fields:
|
|
136
|
+
field_attributes = self._extract_attributes(field_info)
|
|
137
|
+
for k, v in field_attributes.items():
|
|
138
|
+
element.set(k, v)
|
|
139
|
+
self._included_fields.add(field_repr)
|
|
140
|
+
return element
|
|
141
|
+
|
|
142
|
+
def _init_structure_info(self):
|
|
143
|
+
"""Create maps with all data information (fields info and class names), if not already created."""
|
|
144
|
+
if not self._is_info_extracted:
|
|
145
|
+
self._parse_data_structures(self.data)
|
|
146
|
+
self._is_info_extracted = True
|
|
147
|
+
|
|
148
|
+
def _mapping_to_xml(
|
|
149
|
+
self,
|
|
150
|
+
element: ElementTree.Element,
|
|
151
|
+
mapping: Mapping[Any, Any],
|
|
152
|
+
path: str = '',
|
|
153
|
+
) -> None:
|
|
154
|
+
for key, value in mapping.items():
|
|
155
|
+
if isinstance(key, int):
|
|
156
|
+
key = str(key)
|
|
157
|
+
elif not isinstance(key, str):
|
|
158
|
+
raise TypeError(f'Unsupported key type for XML formatting: {type(key)}, only str and int are allowed')
|
|
159
|
+
element.append(self._to_xml(value=value, path=f'{path}.{key}' if path else key, tag=key))
|
|
160
|
+
|
|
161
|
+
def _parse_data_structures(
|
|
162
|
+
self,
|
|
163
|
+
value: Any,
|
|
164
|
+
path: str = '',
|
|
165
|
+
):
|
|
166
|
+
"""Parse data structures as dataclasses or Pydantic models to extract element names and attributes."""
|
|
167
|
+
if value is None or isinstance(value, (str | int | float | date | bytearray | bytes | bool)):
|
|
168
|
+
return
|
|
169
|
+
elif isinstance(value, Mapping):
|
|
170
|
+
for k, v in value.items(): # pyright: ignore[reportUnknownVariableType]
|
|
171
|
+
self._parse_data_structures(v, f'{path}.{k}' if path else f'{k}')
|
|
172
|
+
elif is_dataclass(value) and not isinstance(value, type):
|
|
173
|
+
self._element_names[path] = value.__class__.__name__
|
|
174
|
+
for field in fields(value):
|
|
175
|
+
new_path = f'{path}.{field.name}' if path else field.name
|
|
176
|
+
if self.include_field_info and field.metadata:
|
|
177
|
+
attributes = {k: v for k, v in field.metadata.items() if k in self._FIELD_ATTRIBUTES}
|
|
178
|
+
if attributes:
|
|
179
|
+
field_repr = f'{value.__class__.__name__}.{field.name}'
|
|
180
|
+
self._fields_info[new_path] = (field_repr, FieldInfo(**attributes))
|
|
181
|
+
self._parse_data_structures(getattr(value, field.name), new_path)
|
|
182
|
+
elif isinstance(value, BaseModel):
|
|
183
|
+
self._element_names[path] = value.__class__.__name__
|
|
184
|
+
for model_fields in (value.__class__.model_fields, value.__class__.model_computed_fields):
|
|
185
|
+
for field, info in model_fields.items():
|
|
186
|
+
new_path = f'{path}.{field}' if path else field
|
|
187
|
+
if self.include_field_info and (isinstance(info, ComputedFieldInfo) or not info.exclude):
|
|
188
|
+
field_repr = f'{value.__class__.__name__}.{field}'
|
|
189
|
+
self._fields_info[new_path] = (field_repr, info)
|
|
190
|
+
self._parse_data_structures(getattr(value, field), new_path)
|
|
191
|
+
elif isinstance(value, Iterable):
|
|
192
|
+
for n, item in enumerate(value): # pyright: ignore[reportUnknownVariableType,reportUnknownArgumentType]
|
|
193
|
+
new_path = f'{path}.[{n}]' if path else f'[{n}]'
|
|
194
|
+
self._parse_data_structures(item, new_path)
|
|
195
|
+
|
|
196
|
+
@classmethod
|
|
197
|
+
def _extract_attributes(cls, info: FieldInfo | ComputedFieldInfo) -> dict[str, str]:
|
|
198
|
+
return {attr: str(value) for attr in cls._FIELD_ATTRIBUTES if (value := getattr(info, attr, None)) is not None}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _rootless_xml_elements(root: ElementTree.Element, indent: str | None) -> Iterator[str]:
|
|
202
|
+
for sub_element in root:
|
|
203
|
+
if indent is not None:
|
|
204
|
+
ElementTree.indent(sub_element, space=indent)
|
|
205
|
+
yield ElementTree.tostring(sub_element, encoding='unicode')
|
|
@@ -540,7 +540,7 @@ class MCPServerStdio(MCPServer):
|
|
|
540
540
|
f'args={self.args!r}',
|
|
541
541
|
]
|
|
542
542
|
if self.id:
|
|
543
|
-
repr_args.append(f'id={self.id!r}')
|
|
543
|
+
repr_args.append(f'id={self.id!r}') # pragma: lax no cover
|
|
544
544
|
return f'{self.__class__.__name__}({", ".join(repr_args)})'
|
|
545
545
|
|
|
546
546
|
def __eq__(self, value: object, /) -> bool:
|
|
@@ -126,6 +126,7 @@ class FileUrl(ABC):
|
|
|
126
126
|
|
|
127
127
|
Supported by:
|
|
128
128
|
- `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
129
|
+
- `OpenAIChatModel`, `OpenAIResponsesModel`: `ImageUrl.vendor_metadata['detail']` is used as `detail` setting for images
|
|
129
130
|
"""
|
|
130
131
|
|
|
131
132
|
_media_type: Annotated[str | None, pydantic.Field(alias='media_type', default=None, exclude=True)] = field(
|
|
@@ -471,6 +472,7 @@ class BinaryContent:
|
|
|
471
472
|
|
|
472
473
|
Supported by:
|
|
473
474
|
- `GoogleModel`: `BinaryContent.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
475
|
+
- `OpenAIChatModel`, `OpenAIResponsesModel`: `BinaryContent.vendor_metadata['detail']` is used as `detail` setting for images
|
|
474
476
|
"""
|
|
475
477
|
|
|
476
478
|
kind: Literal['binary'] = 'binary'
|
|
@@ -1161,11 +1163,7 @@ class ModelResponse:
|
|
|
1161
1163
|
if settings.include_content and part.content is not None: # pragma: no branch
|
|
1162
1164
|
from .models.instrumented import InstrumentedModel
|
|
1163
1165
|
|
|
1164
|
-
return_part['result'] = (
|
|
1165
|
-
part.content
|
|
1166
|
-
if isinstance(part.content, str)
|
|
1167
|
-
else {k: InstrumentedModel.serialize_any(v) for k, v in part.content.items()}
|
|
1168
|
-
)
|
|
1166
|
+
return_part['result'] = InstrumentedModel.serialize_any(part.content)
|
|
1169
1167
|
|
|
1170
1168
|
parts.append(return_part)
|
|
1171
1169
|
return parts
|
|
@@ -65,6 +65,8 @@ KnownModelName = TypeAliasType(
|
|
|
65
65
|
'anthropic:claude-opus-4-20250514',
|
|
66
66
|
'anthropic:claude-sonnet-4-0',
|
|
67
67
|
'anthropic:claude-sonnet-4-20250514',
|
|
68
|
+
'anthropic:claude-sonnet-4-5',
|
|
69
|
+
'anthropic:claude-sonnet-4-5-20250929',
|
|
68
70
|
'bedrock:amazon.titan-tg1-large',
|
|
69
71
|
'bedrock:amazon.titan-text-lite-v1',
|
|
70
72
|
'bedrock:amazon.titan-text-express-v1',
|
|
@@ -121,23 +123,6 @@ KnownModelName = TypeAliasType(
|
|
|
121
123
|
'cerebras:qwen-3-32b',
|
|
122
124
|
'cerebras:qwen-3-coder-480b',
|
|
123
125
|
'cerebras:qwen-3-235b-a22b-thinking-2507',
|
|
124
|
-
'claude-3-5-haiku-20241022',
|
|
125
|
-
'claude-3-5-haiku-latest',
|
|
126
|
-
'claude-3-5-sonnet-20240620',
|
|
127
|
-
'claude-3-5-sonnet-20241022',
|
|
128
|
-
'claude-3-5-sonnet-latest',
|
|
129
|
-
'claude-3-7-sonnet-20250219',
|
|
130
|
-
'claude-3-7-sonnet-latest',
|
|
131
|
-
'claude-3-haiku-20240307',
|
|
132
|
-
'claude-3-opus-20240229',
|
|
133
|
-
'claude-3-opus-latest',
|
|
134
|
-
'claude-4-opus-20250514',
|
|
135
|
-
'claude-4-sonnet-20250514',
|
|
136
|
-
'claude-opus-4-0',
|
|
137
|
-
'claude-opus-4-1-20250805',
|
|
138
|
-
'claude-opus-4-20250514',
|
|
139
|
-
'claude-sonnet-4-0',
|
|
140
|
-
'claude-sonnet-4-20250514',
|
|
141
126
|
'cohere:c4ai-aya-expanse-32b',
|
|
142
127
|
'cohere:c4ai-aya-expanse-8b',
|
|
143
128
|
'cohere:command',
|
|
@@ -163,54 +148,6 @@ KnownModelName = TypeAliasType(
|
|
|
163
148
|
'google-vertex:gemini-2.5-flash',
|
|
164
149
|
'google-vertex:gemini-2.5-flash-lite',
|
|
165
150
|
'google-vertex:gemini-2.5-pro',
|
|
166
|
-
'gpt-3.5-turbo',
|
|
167
|
-
'gpt-3.5-turbo-0125',
|
|
168
|
-
'gpt-3.5-turbo-0301',
|
|
169
|
-
'gpt-3.5-turbo-0613',
|
|
170
|
-
'gpt-3.5-turbo-1106',
|
|
171
|
-
'gpt-3.5-turbo-16k',
|
|
172
|
-
'gpt-3.5-turbo-16k-0613',
|
|
173
|
-
'gpt-4',
|
|
174
|
-
'gpt-4-0125-preview',
|
|
175
|
-
'gpt-4-0314',
|
|
176
|
-
'gpt-4-0613',
|
|
177
|
-
'gpt-4-1106-preview',
|
|
178
|
-
'gpt-4-32k',
|
|
179
|
-
'gpt-4-32k-0314',
|
|
180
|
-
'gpt-4-32k-0613',
|
|
181
|
-
'gpt-4-turbo',
|
|
182
|
-
'gpt-4-turbo-2024-04-09',
|
|
183
|
-
'gpt-4-turbo-preview',
|
|
184
|
-
'gpt-4-vision-preview',
|
|
185
|
-
'gpt-4.1',
|
|
186
|
-
'gpt-4.1-2025-04-14',
|
|
187
|
-
'gpt-4.1-mini',
|
|
188
|
-
'gpt-4.1-mini-2025-04-14',
|
|
189
|
-
'gpt-4.1-nano',
|
|
190
|
-
'gpt-4.1-nano-2025-04-14',
|
|
191
|
-
'gpt-4o',
|
|
192
|
-
'gpt-4o-2024-05-13',
|
|
193
|
-
'gpt-4o-2024-08-06',
|
|
194
|
-
'gpt-4o-2024-11-20',
|
|
195
|
-
'gpt-4o-audio-preview',
|
|
196
|
-
'gpt-4o-audio-preview-2024-10-01',
|
|
197
|
-
'gpt-4o-audio-preview-2024-12-17',
|
|
198
|
-
'gpt-4o-audio-preview-2025-06-03',
|
|
199
|
-
'gpt-4o-mini',
|
|
200
|
-
'gpt-4o-mini-2024-07-18',
|
|
201
|
-
'gpt-4o-mini-audio-preview',
|
|
202
|
-
'gpt-4o-mini-audio-preview-2024-12-17',
|
|
203
|
-
'gpt-4o-mini-search-preview',
|
|
204
|
-
'gpt-4o-mini-search-preview-2025-03-11',
|
|
205
|
-
'gpt-4o-search-preview',
|
|
206
|
-
'gpt-4o-search-preview-2025-03-11',
|
|
207
|
-
'gpt-5',
|
|
208
|
-
'gpt-5-2025-08-07',
|
|
209
|
-
'gpt-5-chat-latest',
|
|
210
|
-
'gpt-5-mini',
|
|
211
|
-
'gpt-5-mini-2025-08-07',
|
|
212
|
-
'gpt-5-nano',
|
|
213
|
-
'gpt-5-nano-2025-08-07',
|
|
214
151
|
'grok:grok-4',
|
|
215
152
|
'grok:grok-4-0709',
|
|
216
153
|
'grok:grok-3',
|
|
@@ -271,22 +208,6 @@ KnownModelName = TypeAliasType(
|
|
|
271
208
|
'moonshotai:kimi-latest',
|
|
272
209
|
'moonshotai:kimi-thinking-preview',
|
|
273
210
|
'moonshotai:kimi-k2-0711-preview',
|
|
274
|
-
'o1',
|
|
275
|
-
'o1-2024-12-17',
|
|
276
|
-
'o1-mini',
|
|
277
|
-
'o1-mini-2024-09-12',
|
|
278
|
-
'o1-preview',
|
|
279
|
-
'o1-preview-2024-09-12',
|
|
280
|
-
'o1-pro',
|
|
281
|
-
'o1-pro-2025-03-19',
|
|
282
|
-
'o3',
|
|
283
|
-
'o3-2025-04-16',
|
|
284
|
-
'o3-deep-research',
|
|
285
|
-
'o3-deep-research-2025-06-26',
|
|
286
|
-
'o3-mini',
|
|
287
|
-
'o3-mini-2025-01-31',
|
|
288
|
-
'o3-pro',
|
|
289
|
-
'o3-pro-2025-06-10',
|
|
290
211
|
'openai:chatgpt-4o-latest',
|
|
291
212
|
'openai:codex-mini-latest',
|
|
292
213
|
'openai:gpt-3.5-turbo',
|