pydantic-ai-slim 0.3.4__tar.gz → 0.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/PKG-INFO +4 -4
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_agent_graph.py +32 -1
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_output.py +1 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_utils.py +16 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/agent.py +24 -14
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/messages.py +26 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/openai.py +4 -1
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/output.py +4 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google.py +6 -1
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/.gitignore +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/LICENSE +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/README.md +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/google.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.3.4 → pydantic_ai_slim-0.3.5}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.5
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.3.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.5
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.3.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.5; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.3.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.5; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -743,6 +743,30 @@ async def process_function_tools( # noqa C901
|
|
|
743
743
|
if isinstance(result, _messages.RetryPromptPart):
|
|
744
744
|
results_by_index[index] = result
|
|
745
745
|
elif isinstance(result, _messages.ToolReturnPart):
|
|
746
|
+
if isinstance(result.content, _messages.ToolReturn):
|
|
747
|
+
tool_return = result.content
|
|
748
|
+
if (
|
|
749
|
+
isinstance(tool_return.return_value, _messages.MultiModalContentTypes)
|
|
750
|
+
or isinstance(tool_return.return_value, list)
|
|
751
|
+
and any(
|
|
752
|
+
isinstance(content, _messages.MultiModalContentTypes)
|
|
753
|
+
for content in tool_return.return_value # type: ignore
|
|
754
|
+
)
|
|
755
|
+
):
|
|
756
|
+
raise exceptions.UserError(
|
|
757
|
+
f"{result.tool_name}'s `return_value` contains invalid nested MultiModalContentTypes objects. "
|
|
758
|
+
f'Please use `content` instead.'
|
|
759
|
+
)
|
|
760
|
+
result.content = tool_return.return_value # type: ignore
|
|
761
|
+
result.metadata = tool_return.metadata
|
|
762
|
+
if tool_return.content:
|
|
763
|
+
user_parts.append(
|
|
764
|
+
_messages.UserPromptPart(
|
|
765
|
+
content=list(tool_return.content),
|
|
766
|
+
timestamp=result.timestamp,
|
|
767
|
+
part_kind='user-prompt',
|
|
768
|
+
)
|
|
769
|
+
)
|
|
746
770
|
contents: list[Any]
|
|
747
771
|
single_content: bool
|
|
748
772
|
if isinstance(result.content, list):
|
|
@@ -754,7 +778,13 @@ async def process_function_tools( # noqa C901
|
|
|
754
778
|
|
|
755
779
|
processed_contents: list[Any] = []
|
|
756
780
|
for content in contents:
|
|
757
|
-
if isinstance(content, _messages.
|
|
781
|
+
if isinstance(content, _messages.ToolReturn):
|
|
782
|
+
raise exceptions.UserError(
|
|
783
|
+
f"{result.tool_name}'s return contains invalid nested ToolReturn objects. "
|
|
784
|
+
f'ToolReturn should be used directly.'
|
|
785
|
+
)
|
|
786
|
+
elif isinstance(content, _messages.MultiModalContentTypes):
|
|
787
|
+
# Handle direct multimodal content
|
|
758
788
|
if isinstance(content, _messages.BinaryContent):
|
|
759
789
|
identifier = multi_modal_content_identifier(content.data)
|
|
760
790
|
else:
|
|
@@ -769,6 +799,7 @@ async def process_function_tools( # noqa C901
|
|
|
769
799
|
)
|
|
770
800
|
processed_contents.append(f'See file {identifier}')
|
|
771
801
|
else:
|
|
802
|
+
# Handle regular content
|
|
772
803
|
processed_contents.append(content)
|
|
773
804
|
|
|
774
805
|
if single_content:
|
|
@@ -182,6 +182,7 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
|
|
|
182
182
|
_flatten_output_spec(output_spec.outputs),
|
|
183
183
|
name=output_spec.name,
|
|
184
184
|
description=output_spec.description,
|
|
185
|
+
strict=output_spec.strict,
|
|
185
186
|
)
|
|
186
187
|
)
|
|
187
188
|
elif isinstance(output_spec, PromptedOutput):
|
|
@@ -31,6 +31,8 @@ from typing_inspection.introspection import is_union_origin
|
|
|
31
31
|
|
|
32
32
|
from pydantic_graph._utils import AbstractSpan
|
|
33
33
|
|
|
34
|
+
from . import exceptions
|
|
35
|
+
|
|
34
36
|
AbstractSpan = AbstractSpan
|
|
35
37
|
|
|
36
38
|
if TYPE_CHECKING:
|
|
@@ -415,6 +417,20 @@ def merge_json_schema_defs(schemas: list[dict[str, Any]]) -> tuple[list[dict[str
|
|
|
415
417
|
return rewritten_schemas, all_defs
|
|
416
418
|
|
|
417
419
|
|
|
420
|
+
def validate_empty_kwargs(_kwargs: dict[str, Any]) -> None:
|
|
421
|
+
"""Validate that no unknown kwargs remain after processing.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
_kwargs: Dictionary of remaining kwargs after specific ones have been processed.
|
|
425
|
+
|
|
426
|
+
Raises:
|
|
427
|
+
UserError: If any unknown kwargs remain.
|
|
428
|
+
"""
|
|
429
|
+
if _kwargs:
|
|
430
|
+
unknown_kwargs = ', '.join(f'`{k}`' for k in _kwargs.keys())
|
|
431
|
+
raise exceptions.UserError(f'Unknown keyword arguments: {unknown_kwargs}')
|
|
432
|
+
|
|
433
|
+
|
|
418
434
|
def strip_markdown_fences(text: str) -> str:
|
|
419
435
|
if text.startswith('{'):
|
|
420
436
|
return text
|
|
@@ -294,11 +294,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
294
294
|
self.name = name
|
|
295
295
|
self.model_settings = model_settings
|
|
296
296
|
|
|
297
|
-
if 'result_type' in _deprecated_kwargs:
|
|
298
|
-
if output_type is not str:
|
|
297
|
+
if 'result_type' in _deprecated_kwargs:
|
|
298
|
+
if output_type is not str: # pragma: no cover
|
|
299
299
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
300
300
|
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
|
|
301
|
-
output_type = _deprecated_kwargs
|
|
301
|
+
output_type = _deprecated_kwargs.pop('result_type')
|
|
302
302
|
|
|
303
303
|
self.output_type = output_type
|
|
304
304
|
|
|
@@ -306,22 +306,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
306
306
|
|
|
307
307
|
self._deps_type = deps_type
|
|
308
308
|
|
|
309
|
-
self._deprecated_result_tool_name = _deprecated_kwargs.
|
|
310
|
-
if self._deprecated_result_tool_name is not None:
|
|
309
|
+
self._deprecated_result_tool_name = _deprecated_kwargs.pop('result_tool_name', None)
|
|
310
|
+
if self._deprecated_result_tool_name is not None:
|
|
311
311
|
warnings.warn(
|
|
312
312
|
'`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
313
313
|
DeprecationWarning,
|
|
314
314
|
)
|
|
315
315
|
|
|
316
|
-
self._deprecated_result_tool_description = _deprecated_kwargs.
|
|
317
|
-
if self._deprecated_result_tool_description is not None:
|
|
316
|
+
self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
|
|
317
|
+
if self._deprecated_result_tool_description is not None:
|
|
318
318
|
warnings.warn(
|
|
319
319
|
'`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
320
320
|
DeprecationWarning,
|
|
321
321
|
)
|
|
322
|
-
result_retries = _deprecated_kwargs.
|
|
323
|
-
if result_retries is not None:
|
|
324
|
-
if output_retries is not None:
|
|
322
|
+
result_retries = _deprecated_kwargs.pop('result_retries', None)
|
|
323
|
+
if result_retries is not None:
|
|
324
|
+
if output_retries is not None: # pragma: no cover
|
|
325
325
|
raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
|
|
326
326
|
warnings.warn('`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning)
|
|
327
327
|
output_retries = result_retries
|
|
@@ -329,6 +329,8 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
329
329
|
default_output_mode = (
|
|
330
330
|
self.model.profile.default_structured_output_mode if isinstance(self.model, models.Model) else None
|
|
331
331
|
)
|
|
332
|
+
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
333
|
+
|
|
332
334
|
self._output_schema = _output.OutputSchema[OutputDataT].build(
|
|
333
335
|
output_type,
|
|
334
336
|
default_mode=default_output_mode,
|
|
@@ -469,7 +471,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
469
471
|
if output_type is not str:
|
|
470
472
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
471
473
|
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
472
|
-
output_type = _deprecated_kwargs
|
|
474
|
+
output_type = _deprecated_kwargs.pop('result_type')
|
|
475
|
+
|
|
476
|
+
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
473
477
|
|
|
474
478
|
async with self.iter(
|
|
475
479
|
user_prompt=user_prompt,
|
|
@@ -635,7 +639,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
635
639
|
if output_type is not str:
|
|
636
640
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
637
641
|
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
638
|
-
output_type = _deprecated_kwargs
|
|
642
|
+
output_type = _deprecated_kwargs.pop('result_type')
|
|
643
|
+
|
|
644
|
+
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
639
645
|
|
|
640
646
|
deps = self._get_deps(deps)
|
|
641
647
|
new_message_index = len(message_history) if message_history else 0
|
|
@@ -872,7 +878,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
872
878
|
if output_type is not str:
|
|
873
879
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
874
880
|
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
875
|
-
output_type = _deprecated_kwargs
|
|
881
|
+
output_type = _deprecated_kwargs.pop('result_type')
|
|
882
|
+
|
|
883
|
+
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
876
884
|
|
|
877
885
|
return get_event_loop().run_until_complete(
|
|
878
886
|
self.run(
|
|
@@ -988,7 +996,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
988
996
|
if output_type is not str:
|
|
989
997
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
990
998
|
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
991
|
-
output_type = _deprecated_kwargs
|
|
999
|
+
output_type = _deprecated_kwargs.pop('result_type')
|
|
1000
|
+
|
|
1001
|
+
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
992
1002
|
|
|
993
1003
|
yielded = False
|
|
994
1004
|
async with self.iter(
|
|
@@ -306,6 +306,29 @@ class BinaryContent:
|
|
|
306
306
|
|
|
307
307
|
UserContent: TypeAlias = 'str | ImageUrl | AudioUrl | DocumentUrl | VideoUrl | BinaryContent'
|
|
308
308
|
|
|
309
|
+
|
|
310
|
+
@dataclass(repr=False)
|
|
311
|
+
class ToolReturn:
|
|
312
|
+
"""A structured return value for tools that need to provide both a return value and custom content to the model.
|
|
313
|
+
|
|
314
|
+
This class allows tools to return complex responses that include:
|
|
315
|
+
- A return value for actual tool return
|
|
316
|
+
- Custom content (including multi-modal content) to be sent to the model as a UserPromptPart
|
|
317
|
+
- Optional metadata for application use
|
|
318
|
+
"""
|
|
319
|
+
|
|
320
|
+
return_value: Any
|
|
321
|
+
"""The return value to be used in the tool response."""
|
|
322
|
+
|
|
323
|
+
content: Sequence[UserContent] | None = None
|
|
324
|
+
"""The content sequence to be sent to the model as a UserPromptPart."""
|
|
325
|
+
|
|
326
|
+
metadata: Any = None
|
|
327
|
+
"""Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
|
|
328
|
+
|
|
329
|
+
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
330
|
+
|
|
331
|
+
|
|
309
332
|
# Ideally this would be a Union of types, but Python 3.9 requires it to be a string, and strings don't work with `isinstance``.
|
|
310
333
|
MultiModalContentTypes = (ImageUrl, AudioUrl, DocumentUrl, VideoUrl, BinaryContent)
|
|
311
334
|
_document_format_lookup: dict[str, DocumentFormat] = {
|
|
@@ -396,6 +419,9 @@ class ToolReturnPart:
|
|
|
396
419
|
tool_call_id: str
|
|
397
420
|
"""The tool call identifier, this is used by some models including OpenAI."""
|
|
398
421
|
|
|
422
|
+
metadata: Any = None
|
|
423
|
+
"""Additional data that can be accessed programmatically by the application but is not sent to the LLM."""
|
|
424
|
+
|
|
399
425
|
timestamp: datetime = field(default_factory=_now_utc)
|
|
400
426
|
"""The timestamp, when the tool returned."""
|
|
401
427
|
|
|
@@ -644,13 +644,16 @@ class OpenAIResponsesModel(Model):
|
|
|
644
644
|
"""Process a non-streamed response, and prepare a message to return."""
|
|
645
645
|
timestamp = number_to_datetime(response.created_at)
|
|
646
646
|
items: list[ModelResponsePart] = []
|
|
647
|
-
items.append(TextPart(response.output_text))
|
|
648
647
|
for item in response.output:
|
|
649
648
|
if item.type == 'reasoning':
|
|
650
649
|
for summary in item.summary:
|
|
651
650
|
# NOTE: We use the same id for all summaries because we can merge them on the round trip.
|
|
652
651
|
# The providers don't force the signature to be unique.
|
|
653
652
|
items.append(ThinkingPart(content=summary.text, id=item.id))
|
|
653
|
+
elif item.type == 'message':
|
|
654
|
+
for content in item.content:
|
|
655
|
+
if content.type == 'output_text': # pragma: no branch
|
|
656
|
+
items.append(TextPart(content.text))
|
|
654
657
|
elif item.type == 'function_call':
|
|
655
658
|
items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
|
|
656
659
|
return ModelResponse(
|
|
@@ -154,6 +154,8 @@ class NativeOutput(Generic[OutputDataT]):
|
|
|
154
154
|
"""The name of the structured output that will be passed to the model. If not specified and only one output is provided, the name of the output type or function will be used."""
|
|
155
155
|
description: str | None
|
|
156
156
|
"""The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
|
|
157
|
+
strict: bool | None
|
|
158
|
+
"""Whether to use strict mode for the output, if the model supports it."""
|
|
157
159
|
|
|
158
160
|
def __init__(
|
|
159
161
|
self,
|
|
@@ -161,10 +163,12 @@ class NativeOutput(Generic[OutputDataT]):
|
|
|
161
163
|
*,
|
|
162
164
|
name: str | None = None,
|
|
163
165
|
description: str | None = None,
|
|
166
|
+
strict: bool | None = None,
|
|
164
167
|
):
|
|
165
168
|
self.outputs = outputs
|
|
166
169
|
self.name = name
|
|
167
170
|
self.description = description
|
|
171
|
+
self.strict = strict
|
|
168
172
|
|
|
169
173
|
|
|
170
174
|
@dataclass(init=False)
|
|
@@ -104,7 +104,12 @@ class GoogleProvider(Provider[genai.Client]):
|
|
|
104
104
|
self._client = genai.Client(
|
|
105
105
|
vertexai=vertexai,
|
|
106
106
|
project=project or os.environ.get('GOOGLE_CLOUD_PROJECT'),
|
|
107
|
-
|
|
107
|
+
# From https://github.com/pydantic/pydantic-ai/pull/2031/files#r2169682149:
|
|
108
|
+
# Currently `us-central1` supports the most models by far of any region including `global`, but not
|
|
109
|
+
# all of them. `us-central1` has all google models but is missing some Anthropic partner models,
|
|
110
|
+
# which use `us-east5` instead. `global` has fewer models but higher availability.
|
|
111
|
+
# For more details, check: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#available-regions
|
|
112
|
+
location=location or os.environ.get('GOOGLE_CLOUD_LOCATION') or 'us-central1',
|
|
108
113
|
credentials=credentials,
|
|
109
114
|
http_options={'headers': {'User-Agent': get_user_agent()}},
|
|
110
115
|
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|