pydantic-ai-slim 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +5 -2
- pydantic_ai/_agent_graph.py +33 -15
- pydantic_ai/_cli.py +7 -3
- pydantic_ai/_function_schema.py +1 -4
- pydantic_ai/_mcp.py +123 -0
- pydantic_ai/_output.py +654 -159
- pydantic_ai/_run_context.py +56 -0
- pydantic_ai/_system_prompt.py +2 -1
- pydantic_ai/_utils.py +111 -1
- pydantic_ai/agent.py +66 -35
- pydantic_ai/mcp.py +144 -115
- pydantic_ai/models/__init__.py +21 -2
- pydantic_ai/models/function.py +21 -3
- pydantic_ai/models/gemini.py +27 -4
- pydantic_ai/models/google.py +29 -4
- pydantic_ai/models/mcp_sampling.py +95 -0
- pydantic_ai/models/mistral.py +5 -1
- pydantic_ai/models/openai.py +70 -9
- pydantic_ai/models/test.py +1 -1
- pydantic_ai/models/wrapper.py +6 -0
- pydantic_ai/output.py +288 -0
- pydantic_ai/profiles/__init__.py +21 -0
- pydantic_ai/profiles/_json_schema.py +1 -1
- pydantic_ai/profiles/google.py +6 -2
- pydantic_ai/profiles/openai.py +5 -0
- pydantic_ai/result.py +52 -26
- pydantic_ai/settings.py +1 -0
- pydantic_ai/tools.py +2 -47
- {pydantic_ai_slim-0.3.1.dist-info → pydantic_ai_slim-0.3.3.dist-info}/METADATA +4 -4
- {pydantic_ai_slim-0.3.1.dist-info → pydantic_ai_slim-0.3.3.dist-info}/RECORD +33 -29
- {pydantic_ai_slim-0.3.1.dist-info → pydantic_ai_slim-0.3.3.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.3.1.dist-info → pydantic_ai_slim-0.3.3.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.3.1.dist-info → pydantic_ai_slim-0.3.3.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/profiles/__init__.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass, fields, replace
|
|
4
|
+
from textwrap import dedent
|
|
4
5
|
from typing import Callable, Union
|
|
5
6
|
|
|
6
7
|
from typing_extensions import Self
|
|
7
8
|
|
|
9
|
+
from ..output import StructuredOutputMode
|
|
8
10
|
from ._json_schema import JsonSchemaTransformer
|
|
9
11
|
|
|
10
12
|
|
|
@@ -12,7 +14,26 @@ from ._json_schema import JsonSchemaTransformer
|
|
|
12
14
|
class ModelProfile:
|
|
13
15
|
"""Describes how requests to a specific model or family of models need to be constructed to get the best results, independent of the model and provider classes used."""
|
|
14
16
|
|
|
17
|
+
supports_tools: bool = True
|
|
18
|
+
"""Whether the model supports tools."""
|
|
19
|
+
supports_json_schema_output: bool = False
|
|
20
|
+
"""Whether the model supports JSON schema output."""
|
|
21
|
+
supports_json_object_output: bool = False
|
|
22
|
+
"""Whether the model supports JSON object output."""
|
|
23
|
+
default_structured_output_mode: StructuredOutputMode = 'tool'
|
|
24
|
+
"""The default structured output mode to use for the model."""
|
|
25
|
+
prompted_output_template: str = dedent(
|
|
26
|
+
"""
|
|
27
|
+
Always respond with a JSON object that's compatible with this schema:
|
|
28
|
+
|
|
29
|
+
{schema}
|
|
30
|
+
|
|
31
|
+
Don't include any text or Markdown fencing before or after.
|
|
32
|
+
"""
|
|
33
|
+
)
|
|
34
|
+
"""The instructions template to use for prompted structured output. The '{schema}' placeholder will be replaced with the JSON schema for the output."""
|
|
15
35
|
json_schema_transformer: type[JsonSchemaTransformer] | None = None
|
|
36
|
+
"""The transformer to use to make JSON schemas for tools and structured output compatible with the model."""
|
|
16
37
|
|
|
17
38
|
@classmethod
|
|
18
39
|
def from_profile(cls, profile: ModelProfile | None) -> Self:
|
pydantic_ai/profiles/google.py
CHANGED
|
@@ -10,7 +10,11 @@ from ._json_schema import JsonSchema, JsonSchemaTransformer
|
|
|
10
10
|
|
|
11
11
|
def google_model_profile(model_name: str) -> ModelProfile | None:
|
|
12
12
|
"""Get the model profile for a Google model."""
|
|
13
|
-
return ModelProfile(
|
|
13
|
+
return ModelProfile(
|
|
14
|
+
json_schema_transformer=GoogleJsonSchemaTransformer,
|
|
15
|
+
supports_json_schema_output=True,
|
|
16
|
+
supports_json_object_output=True,
|
|
17
|
+
)
|
|
14
18
|
|
|
15
19
|
|
|
16
20
|
class GoogleJsonSchemaTransformer(JsonSchemaTransformer):
|
|
@@ -47,7 +51,7 @@ class GoogleJsonSchemaTransformer(JsonSchemaTransformer):
|
|
|
47
51
|
schema.pop('title', None)
|
|
48
52
|
schema.pop('default', None)
|
|
49
53
|
schema.pop('$schema', None)
|
|
50
|
-
if (const := schema.pop('const', None)) is not None:
|
|
54
|
+
if (const := schema.pop('const', None)) is not None:
|
|
51
55
|
# Gemini doesn't support const, but it does support enum with a single value
|
|
52
56
|
schema['enum'] = [const]
|
|
53
57
|
schema.pop('discriminator', None)
|
pydantic_ai/profiles/openai.py
CHANGED
|
@@ -25,8 +25,13 @@ class OpenAIModelProfile(ModelProfile):
|
|
|
25
25
|
def openai_model_profile(model_name: str) -> ModelProfile:
|
|
26
26
|
"""Get the model profile for an OpenAI model."""
|
|
27
27
|
is_reasoning_model = model_name.startswith('o')
|
|
28
|
+
# Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
|
|
29
|
+
# We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
|
|
30
|
+
# when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
|
|
28
31
|
return OpenAIModelProfile(
|
|
29
32
|
json_schema_transformer=OpenAIJsonSchemaTransformer,
|
|
33
|
+
supports_json_schema_output=True,
|
|
34
|
+
supports_json_object_output=True,
|
|
30
35
|
openai_supports_sampling_settings=not is_reasoning_model,
|
|
31
36
|
)
|
|
32
37
|
|
pydantic_ai/result.py
CHANGED
|
@@ -5,24 +5,35 @@ from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable
|
|
|
5
5
|
from copy import copy
|
|
6
6
|
from dataclasses import dataclass, field
|
|
7
7
|
from datetime import datetime
|
|
8
|
-
from typing import Generic
|
|
8
|
+
from typing import Generic
|
|
9
9
|
|
|
10
|
-
from
|
|
10
|
+
from pydantic import ValidationError
|
|
11
|
+
from typing_extensions import TypeVar, deprecated, overload
|
|
11
12
|
|
|
12
|
-
from . import
|
|
13
|
+
from . import _utils, exceptions, messages as _messages, models
|
|
13
14
|
from ._output import (
|
|
14
|
-
OutputDataT,
|
|
15
15
|
OutputDataT_inv,
|
|
16
16
|
OutputSchema,
|
|
17
17
|
OutputValidator,
|
|
18
18
|
OutputValidatorFunc,
|
|
19
|
-
|
|
19
|
+
PlainTextOutputSchema,
|
|
20
|
+
TextOutputSchema,
|
|
21
|
+
ToolOutputSchema,
|
|
20
22
|
)
|
|
23
|
+
from ._run_context import AgentDepsT, RunContext
|
|
21
24
|
from .messages import AgentStreamEvent, FinalResultEvent
|
|
22
|
-
from .
|
|
25
|
+
from .output import (
|
|
26
|
+
OutputDataT,
|
|
27
|
+
ToolOutput,
|
|
28
|
+
)
|
|
23
29
|
from .usage import Usage, UsageLimits
|
|
24
30
|
|
|
25
|
-
__all__ =
|
|
31
|
+
__all__ = (
|
|
32
|
+
'OutputDataT',
|
|
33
|
+
'OutputDataT_inv',
|
|
34
|
+
'ToolOutput',
|
|
35
|
+
'OutputValidatorFunc',
|
|
36
|
+
)
|
|
26
37
|
|
|
27
38
|
|
|
28
39
|
T = TypeVar('T')
|
|
@@ -32,7 +43,7 @@ T = TypeVar('T')
|
|
|
32
43
|
@dataclass
|
|
33
44
|
class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
34
45
|
_raw_stream_response: models.StreamedResponse
|
|
35
|
-
_output_schema: OutputSchema[OutputDataT]
|
|
46
|
+
_output_schema: OutputSchema[OutputDataT]
|
|
36
47
|
_output_validators: list[OutputValidator[AgentDepsT, OutputDataT]]
|
|
37
48
|
_run_ctx: RunContext[AgentDepsT]
|
|
38
49
|
_usage_limits: UsageLimits | None
|
|
@@ -80,7 +91,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
80
91
|
) -> OutputDataT:
|
|
81
92
|
"""Validate a structured result message."""
|
|
82
93
|
call = None
|
|
83
|
-
if self._output_schema
|
|
94
|
+
if isinstance(self._output_schema, ToolOutputSchema) and output_tool_name is not None:
|
|
84
95
|
match = self._output_schema.find_named_tool(message.parts, output_tool_name)
|
|
85
96
|
if match is None:
|
|
86
97
|
raise exceptions.UnexpectedModelBehavior( # pragma: no cover
|
|
@@ -91,10 +102,16 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
91
102
|
result_data = await output_tool.process(
|
|
92
103
|
call, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
|
|
93
104
|
)
|
|
94
|
-
|
|
105
|
+
elif isinstance(self._output_schema, TextOutputSchema):
|
|
95
106
|
text = '\n\n'.join(x.content for x in message.parts if isinstance(x, _messages.TextPart))
|
|
96
|
-
|
|
97
|
-
result_data =
|
|
107
|
+
|
|
108
|
+
result_data = await self._output_schema.process(
|
|
109
|
+
text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
|
|
110
|
+
)
|
|
111
|
+
else:
|
|
112
|
+
raise exceptions.UnexpectedModelBehavior( # pragma: no cover
|
|
113
|
+
'Invalid response, unable to process text output'
|
|
114
|
+
)
|
|
98
115
|
|
|
99
116
|
for validator in self._output_validators:
|
|
100
117
|
result_data = await validator.validate(result_data, call, self._run_ctx)
|
|
@@ -117,14 +134,12 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
117
134
|
"""Return an appropriate FinalResultEvent if `e` corresponds to a part that will produce a final result."""
|
|
118
135
|
if isinstance(e, _messages.PartStartEvent):
|
|
119
136
|
new_part = e.part
|
|
120
|
-
if isinstance(new_part, _messages.ToolCallPart):
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
elif _output.allow_text_output(output_schema): # pragma: no branch
|
|
127
|
-
assert_type(e, _messages.PartStartEvent)
|
|
137
|
+
if isinstance(new_part, _messages.ToolCallPart) and isinstance(output_schema, ToolOutputSchema):
|
|
138
|
+
for call, _ in output_schema.find_tool([new_part]): # pragma: no branch
|
|
139
|
+
return _messages.FinalResultEvent(tool_name=call.tool_name, tool_call_id=call.tool_call_id)
|
|
140
|
+
elif isinstance(new_part, _messages.TextPart) and isinstance(
|
|
141
|
+
output_schema, TextOutputSchema
|
|
142
|
+
): # pragma: no branch
|
|
128
143
|
return _messages.FinalResultEvent(tool_name=None, tool_call_id=None)
|
|
129
144
|
|
|
130
145
|
usage_checking_stream = _get_usage_checking_stream_response(
|
|
@@ -155,7 +170,7 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
155
170
|
|
|
156
171
|
_usage_limits: UsageLimits | None
|
|
157
172
|
_stream_response: models.StreamedResponse
|
|
158
|
-
_output_schema: OutputSchema[OutputDataT]
|
|
173
|
+
_output_schema: OutputSchema[OutputDataT]
|
|
159
174
|
_run_ctx: RunContext[AgentDepsT]
|
|
160
175
|
_output_validators: list[OutputValidator[AgentDepsT, OutputDataT]]
|
|
161
176
|
_output_tool_name: str | None
|
|
@@ -296,7 +311,11 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
296
311
|
An async iterable of the response data.
|
|
297
312
|
"""
|
|
298
313
|
async for structured_message, is_last in self.stream_structured(debounce_by=debounce_by):
|
|
299
|
-
|
|
314
|
+
try:
|
|
315
|
+
yield await self.validate_structured_output(structured_message, allow_partial=not is_last)
|
|
316
|
+
except ValidationError:
|
|
317
|
+
if is_last:
|
|
318
|
+
raise # pragma: lax no cover
|
|
300
319
|
|
|
301
320
|
async def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> AsyncIterator[str]:
|
|
302
321
|
"""Stream the text result as an async iterable.
|
|
@@ -311,7 +330,7 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
311
330
|
Debouncing is particularly important for long structured responses to reduce the overhead of
|
|
312
331
|
performing validation as each token is received.
|
|
313
332
|
"""
|
|
314
|
-
if
|
|
333
|
+
if not isinstance(self._output_schema, PlainTextOutputSchema):
|
|
315
334
|
raise exceptions.UserError('stream_text() can only be used with text responses')
|
|
316
335
|
|
|
317
336
|
if delta:
|
|
@@ -390,7 +409,7 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
390
409
|
) -> OutputDataT:
|
|
391
410
|
"""Validate a structured result message."""
|
|
392
411
|
call = None
|
|
393
|
-
if self._output_schema
|
|
412
|
+
if isinstance(self._output_schema, ToolOutputSchema) and self._output_tool_name is not None:
|
|
394
413
|
match = self._output_schema.find_named_tool(message.parts, self._output_tool_name)
|
|
395
414
|
if match is None:
|
|
396
415
|
raise exceptions.UnexpectedModelBehavior( # pragma: no cover
|
|
@@ -401,9 +420,16 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
401
420
|
result_data = await output_tool.process(
|
|
402
421
|
call, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
|
|
403
422
|
)
|
|
404
|
-
|
|
423
|
+
elif isinstance(self._output_schema, TextOutputSchema):
|
|
405
424
|
text = '\n\n'.join(x.content for x in message.parts if isinstance(x, _messages.TextPart))
|
|
406
|
-
|
|
425
|
+
|
|
426
|
+
result_data = await self._output_schema.process(
|
|
427
|
+
text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
|
|
428
|
+
)
|
|
429
|
+
else:
|
|
430
|
+
raise exceptions.UnexpectedModelBehavior( # pragma: no cover
|
|
431
|
+
'Invalid response, unable to process text output'
|
|
432
|
+
)
|
|
407
433
|
|
|
408
434
|
for validator in self._output_validators:
|
|
409
435
|
result_data = await validator.validate(result_data, call, self._run_ctx) # pragma: no cover
|
pydantic_ai/settings.py
CHANGED
pydantic_ai/tools.py
CHANGED
|
@@ -4,7 +4,7 @@ import dataclasses
|
|
|
4
4
|
import json
|
|
5
5
|
from collections.abc import Awaitable, Sequence
|
|
6
6
|
from dataclasses import dataclass, field
|
|
7
|
-
from typing import
|
|
7
|
+
from typing import Any, Callable, Generic, Literal, Union
|
|
8
8
|
|
|
9
9
|
from opentelemetry.trace import Tracer
|
|
10
10
|
from pydantic import ValidationError
|
|
@@ -13,12 +13,9 @@ from pydantic_core import SchemaValidator, core_schema
|
|
|
13
13
|
from typing_extensions import Concatenate, ParamSpec, Self, TypeAlias, TypeVar
|
|
14
14
|
|
|
15
15
|
from . import _function_schema, _utils, messages as _messages
|
|
16
|
+
from ._run_context import AgentDepsT, RunContext
|
|
16
17
|
from .exceptions import ModelRetry, UnexpectedModelBehavior
|
|
17
18
|
|
|
18
|
-
if TYPE_CHECKING:
|
|
19
|
-
from .models import Model
|
|
20
|
-
from .result import Usage
|
|
21
|
-
|
|
22
19
|
__all__ = (
|
|
23
20
|
'AgentDepsT',
|
|
24
21
|
'DocstringFormat',
|
|
@@ -35,48 +32,6 @@ __all__ = (
|
|
|
35
32
|
'ToolDefinition',
|
|
36
33
|
)
|
|
37
34
|
|
|
38
|
-
AgentDepsT = TypeVar('AgentDepsT', default=None, contravariant=True)
|
|
39
|
-
"""Type variable for agent dependencies."""
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
@dataclasses.dataclass(repr=False)
|
|
43
|
-
class RunContext(Generic[AgentDepsT]):
|
|
44
|
-
"""Information about the current call."""
|
|
45
|
-
|
|
46
|
-
deps: AgentDepsT
|
|
47
|
-
"""Dependencies for the agent."""
|
|
48
|
-
model: Model
|
|
49
|
-
"""The model used in this run."""
|
|
50
|
-
usage: Usage
|
|
51
|
-
"""LLM usage associated with the run."""
|
|
52
|
-
prompt: str | Sequence[_messages.UserContent] | None
|
|
53
|
-
"""The original user prompt passed to the run."""
|
|
54
|
-
messages: list[_messages.ModelMessage] = field(default_factory=list)
|
|
55
|
-
"""Messages exchanged in the conversation so far."""
|
|
56
|
-
tool_call_id: str | None = None
|
|
57
|
-
"""The ID of the tool call."""
|
|
58
|
-
tool_name: str | None = None
|
|
59
|
-
"""Name of the tool being called."""
|
|
60
|
-
retry: int = 0
|
|
61
|
-
"""Number of retries so far."""
|
|
62
|
-
run_step: int = 0
|
|
63
|
-
"""The current step in the run."""
|
|
64
|
-
|
|
65
|
-
def replace_with(
|
|
66
|
-
self,
|
|
67
|
-
retry: int | None = None,
|
|
68
|
-
tool_name: str | None | _utils.Unset = _utils.UNSET,
|
|
69
|
-
) -> RunContext[AgentDepsT]:
|
|
70
|
-
# Create a new `RunContext` a new `retry` value and `tool_name`.
|
|
71
|
-
kwargs = {}
|
|
72
|
-
if retry is not None:
|
|
73
|
-
kwargs['retry'] = retry
|
|
74
|
-
if tool_name is not _utils.UNSET: # pragma: no branch
|
|
75
|
-
kwargs['tool_name'] = tool_name
|
|
76
|
-
return dataclasses.replace(self, **kwargs)
|
|
77
|
-
|
|
78
|
-
__repr__ = _utils.dataclasses_no_defaults_repr
|
|
79
|
-
|
|
80
35
|
|
|
81
36
|
ToolParams = ParamSpec('ToolParams', default=...)
|
|
82
37
|
"""Retrieval function param spec."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.3
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.3.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.3
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.3.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.3; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.3.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.3; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.15.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -1,57 +1,61 @@
|
|
|
1
|
-
pydantic_ai/__init__.py,sha256=
|
|
1
|
+
pydantic_ai/__init__.py,sha256=Ns04g4Efqkzwccs8w2nGphfWbptMlIJYG8vIJbGGyG0,1262
|
|
2
2
|
pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
3
3
|
pydantic_ai/_a2a.py,sha256=8nNtx6GENDt2Ej3f1ui9L-FuNQBYVELpJFfwz-y7fUw,7234
|
|
4
|
-
pydantic_ai/_agent_graph.py,sha256=
|
|
5
|
-
pydantic_ai/_cli.py,sha256=
|
|
6
|
-
pydantic_ai/_function_schema.py,sha256=
|
|
4
|
+
pydantic_ai/_agent_graph.py,sha256=DzkPJu4IUNYfUe2jCKAKVMELSVs9lTFFhm2yCHglQu8,40286
|
|
5
|
+
pydantic_ai/_cli.py,sha256=R-sE-9gYqPxV5-5utso4g-bzAKMiTCdo33XOVqE0ZEg,13206
|
|
6
|
+
pydantic_ai/_function_schema.py,sha256=TP9Y1wlN7tRHTDhkJ1IopJHPWpSbxJCxS3zxCb0dpK4,10806
|
|
7
7
|
pydantic_ai/_griffe.py,sha256=Sf_DisE9k2TA0VFeVIK2nf1oOct5MygW86PBCACJkFA,5244
|
|
8
|
-
pydantic_ai/
|
|
8
|
+
pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
|
|
9
|
+
pydantic_ai/_output.py,sha256=PdYdIe9Xg0CiASGnaFeLVP3H4einbTK00syRfXO9BIg,33117
|
|
9
10
|
pydantic_ai/_parts_manager.py,sha256=Lioi8b7Nfyax09yQu8jTkMzxd26dYDrdAqhYvjRSKqQ,16182
|
|
10
|
-
pydantic_ai/
|
|
11
|
+
pydantic_ai/_run_context.py,sha256=zNkSyiQSH-YweO39ii3iB2taouUOodo3sTjz2Lrj4Pc,1792
|
|
12
|
+
pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
|
|
11
13
|
pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
|
|
12
|
-
pydantic_ai/_utils.py,sha256=
|
|
13
|
-
pydantic_ai/agent.py,sha256=
|
|
14
|
+
pydantic_ai/_utils.py,sha256=JKjM3YIgn2CslJcQKxDcq5nWV57TUTS23x8t3u6v_wA,15025
|
|
15
|
+
pydantic_ai/agent.py,sha256=9ltolQE5aHLJEIhBgp0eBrb1aAL9KZgveHu56Qf-JN8,95521
|
|
14
16
|
pydantic_ai/direct.py,sha256=tXRcQ3fMkykaawO51VxnSwQnqcEmu1LhCy7U9gOyM-g,7768
|
|
15
17
|
pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
|
|
16
18
|
pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
|
|
17
19
|
pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
|
|
18
|
-
pydantic_ai/mcp.py,sha256=
|
|
20
|
+
pydantic_ai/mcp.py,sha256=MSLNFiC38A1e5W7K8gQdnh0_DNLqD_AdzW064U83FJ8,21836
|
|
19
21
|
pydantic_ai/messages.py,sha256=7GtSO0x3nypiIZZOiAf72woeTqhuf7W21-jbYS582RQ,36300
|
|
22
|
+
pydantic_ai/output.py,sha256=L05UoIwLbQ2LWvk0TuQrAdbbNdWSP6xZt10l61ORVfI,9147
|
|
20
23
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
-
pydantic_ai/result.py,sha256=
|
|
22
|
-
pydantic_ai/settings.py,sha256=
|
|
23
|
-
pydantic_ai/tools.py,sha256=
|
|
24
|
+
pydantic_ai/result.py,sha256=jCnM_KDX9383OoPy4L8YtPmhmlZkJkCufbq2CGxbgK4,25437
|
|
25
|
+
pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
|
|
26
|
+
pydantic_ai/tools.py,sha256=aIJTWU9LXgigCFoaC3TyJ38-Z5MLI9GaJHt5oYUjGac,17012
|
|
24
27
|
pydantic_ai/usage.py,sha256=35YPmItlzfNOwP35Rhh0qBUOlg5On5rUE7xqHQWrpaU,5596
|
|
25
28
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
29
|
pydantic_ai/common_tools/duckduckgo.py,sha256=Ty9tu1rCwMfGKgz1JAaC2q_4esmL6QvpkHQUN8F0Ecc,2152
|
|
27
30
|
pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQXD7E4,2495
|
|
28
31
|
pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
32
|
pydantic_ai/ext/langchain.py,sha256=TI8B6eBjEGKFfvwyLgC_-0eeba4hDJq7wLZ0OZhbiWw,1967
|
|
30
|
-
pydantic_ai/models/__init__.py,sha256=
|
|
33
|
+
pydantic_ai/models/__init__.py,sha256=baO_rIg9sr_AQVj2Qbz7fgdA-R4eDXriwtkV1u-mS58,29012
|
|
31
34
|
pydantic_ai/models/anthropic.py,sha256=s7yvNObBfS-gcXLT0vU8UXjLHITsbr5kkXgP1SYkPms,23832
|
|
32
35
|
pydantic_ai/models/bedrock.py,sha256=67qf_mFnx0kfmKoI96zLOAUn3P47PxPqMrQsaYUrJJ0,29120
|
|
33
36
|
pydantic_ai/models/cohere.py,sha256=UU04-_O-KLgC4DUpM-g4FBPoTOatbmVJJ7mkZNBGsbQ,12626
|
|
34
37
|
pydantic_ai/models/fallback.py,sha256=idOYGMo3CZzpCBT8DDiuPAAgnV2jzluDUq3ESb3KteM,4981
|
|
35
|
-
pydantic_ai/models/function.py,sha256=
|
|
36
|
-
pydantic_ai/models/gemini.py,sha256=
|
|
37
|
-
pydantic_ai/models/google.py,sha256=
|
|
38
|
+
pydantic_ai/models/function.py,sha256=nfCjRmbcF7sdK_nsak1fvzz9Xkptx5WhsxvWdB02zec,12113
|
|
39
|
+
pydantic_ai/models/gemini.py,sha256=r_DWEmaMvP55duanTLI2SvKC3hAqnDAm2W0-UwLYShI,38485
|
|
40
|
+
pydantic_ai/models/google.py,sha256=fbMG5VEjxHIeoP9JXBgymQtfB8gVnzDU2TTey5FMbbA,23396
|
|
38
41
|
pydantic_ai/models/groq.py,sha256=lojKRdvg0p-EtZ20Z2CS4I0goq4CoGkLj3LuYHA6o-I,18497
|
|
39
42
|
pydantic_ai/models/instrumented.py,sha256=vVq7mS071EXS2PZ3NJ4Zgt93iQgAscFr2dyg9fAeuCE,15703
|
|
40
|
-
pydantic_ai/models/
|
|
41
|
-
pydantic_ai/models/
|
|
42
|
-
pydantic_ai/models/
|
|
43
|
-
pydantic_ai/models/
|
|
44
|
-
pydantic_ai/
|
|
45
|
-
pydantic_ai/profiles/
|
|
43
|
+
pydantic_ai/models/mcp_sampling.py,sha256=dWPJjez_aghF_JVGGCd4nZjlnmxYGLFtUS0vlzfRFyk,3398
|
|
44
|
+
pydantic_ai/models/mistral.py,sha256=Ev2zK2UWh4NrRpc2LQE0LNZFBSznNvZ1mkh4_4VcRMg,30514
|
|
45
|
+
pydantic_ai/models/openai.py,sha256=NUE2WroS0BSxLWlfi-RFGTp0k0iv5zb2LWXQ7-Uz9Hw,53016
|
|
46
|
+
pydantic_ai/models/test.py,sha256=STNd79ZoCyyphm0eFRNDoTpvkOzhw1qFw1zgv44kqsg,17441
|
|
47
|
+
pydantic_ai/models/wrapper.py,sha256=2g06TxE5kFqfaJCwsDJHp7Rltoj0XXH0OzdpRDOcqNo,1861
|
|
48
|
+
pydantic_ai/profiles/__init__.py,sha256=BXMqUpgRfosmYgcxjKAI9ESCj47JTSa30DhKXEgVLzM,2419
|
|
49
|
+
pydantic_ai/profiles/_json_schema.py,sha256=sTNHkaK0kbwmbldZp9JRGQNax0f5Qvwy0HkWuu_nGxU,7179
|
|
46
50
|
pydantic_ai/profiles/amazon.py,sha256=O4ijm1Lpz01vaSiHrkSeGQhbCKV5lyQVtHYqh0pCW_k,339
|
|
47
51
|
pydantic_ai/profiles/anthropic.py,sha256=DtTGh85tbkTrrrn2OrJ4FJKXWUIxUH_1Vw6y5fyMRyM,222
|
|
48
52
|
pydantic_ai/profiles/cohere.py,sha256=lcL34Ht1jZopwuqoU6OV9l8vN4zwF-jiPjlsEABbSRo,215
|
|
49
53
|
pydantic_ai/profiles/deepseek.py,sha256=DS_idprnXpMliKziKF0k1neLDJOwUvpatZ3YLaiYnCM,219
|
|
50
|
-
pydantic_ai/profiles/google.py,sha256=
|
|
54
|
+
pydantic_ai/profiles/google.py,sha256=DJ0otpkCgVIrjwV2lzAUAejw8ivwZT9pNAY_sGRcrVg,4891
|
|
51
55
|
pydantic_ai/profiles/grok.py,sha256=nBOxOCYCK9aiLmz2Q-esqYhotNbbBC1boAoOYIk1tVw,211
|
|
52
56
|
pydantic_ai/profiles/meta.py,sha256=IAGPoUrLWd-g9ajAgpWp9fIeOrP-7dBlZ2HEFjIhUbY,334
|
|
53
57
|
pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
|
|
54
|
-
pydantic_ai/profiles/openai.py,sha256=
|
|
58
|
+
pydantic_ai/profiles/openai.py,sha256=DzrKYvegfCerqavHU3jHzrNQCm0IllWoIqS7_DiQB9M,6281
|
|
55
59
|
pydantic_ai/profiles/qwen.py,sha256=u7pL8uomoQTVl45g5wDrHx0P_oFDLaN6ALswuwmkWc0,334
|
|
56
60
|
pydantic_ai/providers/__init__.py,sha256=l3xrEhFzfCNyZZlJ5RJOi-BjVayZ-SwjNbs7pZ6UDhg,3549
|
|
57
61
|
pydantic_ai/providers/anthropic.py,sha256=D35UXxCPXv8yIbD0fj9Zg2FvNyoMoJMeDUtVM8Sn78I,3046
|
|
@@ -70,8 +74,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
|
|
|
70
74
|
pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
|
|
71
75
|
pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
|
|
72
76
|
pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
|
|
73
|
-
pydantic_ai_slim-0.3.
|
|
74
|
-
pydantic_ai_slim-0.3.
|
|
75
|
-
pydantic_ai_slim-0.3.
|
|
76
|
-
pydantic_ai_slim-0.3.
|
|
77
|
-
pydantic_ai_slim-0.3.
|
|
77
|
+
pydantic_ai_slim-0.3.3.dist-info/METADATA,sha256=q1t0Sge7taA5n0_fN143GAiJfaFrw8wAiOAsHBHA5GE,3846
|
|
78
|
+
pydantic_ai_slim-0.3.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
79
|
+
pydantic_ai_slim-0.3.3.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
80
|
+
pydantic_ai_slim-0.3.3.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
81
|
+
pydantic_ai_slim-0.3.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|