pydantic-ai-slim 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_agent_graph.py +13 -14
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/agent.py +10 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/_json_schema.py +7 -3
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/gemini.py +17 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/tools.py +1 -1
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/README.md +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/openai.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.1}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.1
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.1; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -3,11 +3,11 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import dataclasses
|
|
5
5
|
import json
|
|
6
|
-
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
6
|
+
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
|
|
7
7
|
from contextlib import asynccontextmanager, contextmanager
|
|
8
8
|
from contextvars import ContextVar
|
|
9
9
|
from dataclasses import field
|
|
10
|
-
from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast
|
|
10
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast
|
|
11
11
|
|
|
12
12
|
from opentelemetry.trace import Span, Tracer
|
|
13
13
|
from typing_extensions import TypeGuard, TypeVar, assert_never
|
|
@@ -87,6 +87,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
|
|
|
87
87
|
usage_limits: _usage.UsageLimits
|
|
88
88
|
max_result_retries: int
|
|
89
89
|
end_strategy: EndStrategy
|
|
90
|
+
get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]]
|
|
90
91
|
|
|
91
92
|
output_schema: _output.OutputSchema[OutputDataT] | None
|
|
92
93
|
output_validators: list[_output.OutputValidator[DepsT, OutputDataT]]
|
|
@@ -141,7 +142,9 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
141
142
|
self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
|
|
142
143
|
) -> _messages.ModelRequest:
|
|
143
144
|
run_context = build_run_context(ctx)
|
|
144
|
-
history, next_message = await self._prepare_messages(
|
|
145
|
+
history, next_message = await self._prepare_messages(
|
|
146
|
+
self.user_prompt, ctx.state.message_history, ctx.deps.get_instructions, run_context
|
|
147
|
+
)
|
|
145
148
|
ctx.state.message_history = history
|
|
146
149
|
run_context.messages = history
|
|
147
150
|
|
|
@@ -155,6 +158,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
155
158
|
self,
|
|
156
159
|
user_prompt: str | Sequence[_messages.UserContent] | None,
|
|
157
160
|
message_history: list[_messages.ModelMessage] | None,
|
|
161
|
+
get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]],
|
|
158
162
|
run_context: RunContext[DepsT],
|
|
159
163
|
) -> tuple[list[_messages.ModelMessage], _messages.ModelRequest]:
|
|
160
164
|
try:
|
|
@@ -169,7 +173,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
169
173
|
ctx_messages.used = True
|
|
170
174
|
|
|
171
175
|
parts: list[_messages.ModelRequestPart] = []
|
|
172
|
-
instructions = await
|
|
176
|
+
instructions = await get_instructions(run_context)
|
|
173
177
|
if message_history:
|
|
174
178
|
# Shallow copy messages
|
|
175
179
|
messages.extend(message_history)
|
|
@@ -210,15 +214,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
210
214
|
messages.append(_messages.SystemPromptPart(prompt))
|
|
211
215
|
return messages
|
|
212
216
|
|
|
213
|
-
async def _instructions(self, run_context: RunContext[DepsT]) -> str | None:
|
|
214
|
-
if self.instructions is None and not self.instructions_functions:
|
|
215
|
-
return None
|
|
216
|
-
|
|
217
|
-
instructions = self.instructions or ''
|
|
218
|
-
for instructions_runner in self.instructions_functions:
|
|
219
|
-
instructions += await instructions_runner.run(run_context)
|
|
220
|
-
return instructions
|
|
221
|
-
|
|
222
217
|
|
|
223
218
|
async def _prepare_request_parameters(
|
|
224
219
|
ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
|
|
@@ -479,7 +474,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
479
474
|
else:
|
|
480
475
|
if tool_responses:
|
|
481
476
|
parts.extend(tool_responses)
|
|
482
|
-
|
|
477
|
+
run_context = build_run_context(ctx)
|
|
478
|
+
instructions = await ctx.deps.get_instructions(run_context)
|
|
479
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
|
|
480
|
+
_messages.ModelRequest(parts=parts, instructions=instructions)
|
|
481
|
+
)
|
|
483
482
|
|
|
484
483
|
def _handle_final_result(
|
|
485
484
|
self,
|
|
@@ -620,6 +620,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
620
620
|
},
|
|
621
621
|
)
|
|
622
622
|
|
|
623
|
+
async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
|
|
624
|
+
if self._instructions is None and not self._instructions_functions:
|
|
625
|
+
return None
|
|
626
|
+
|
|
627
|
+
instructions = self._instructions or ''
|
|
628
|
+
for instructions_runner in self._instructions_functions:
|
|
629
|
+
instructions += await instructions_runner.run(run_context)
|
|
630
|
+
return instructions
|
|
631
|
+
|
|
623
632
|
graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunOutputDataT](
|
|
624
633
|
user_deps=deps,
|
|
625
634
|
prompt=user_prompt,
|
|
@@ -635,6 +644,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
635
644
|
mcp_servers=self._mcp_servers,
|
|
636
645
|
run_span=run_span,
|
|
637
646
|
tracer=tracer,
|
|
647
|
+
get_instructions=get_instructions,
|
|
638
648
|
)
|
|
639
649
|
start_node = _agent_graph.UserPromptNode[AgentDepsT](
|
|
640
650
|
user_prompt=user_prompt,
|
|
@@ -20,11 +20,11 @@ class WalkJsonSchema(ABC):
|
|
|
20
20
|
def __init__(
|
|
21
21
|
self, schema: JsonSchema, *, prefer_inlined_defs: bool = False, simplify_nullable_unions: bool = False
|
|
22
22
|
):
|
|
23
|
-
self.schema =
|
|
23
|
+
self.schema = schema
|
|
24
24
|
self.prefer_inlined_defs = prefer_inlined_defs
|
|
25
25
|
self.simplify_nullable_unions = simplify_nullable_unions
|
|
26
26
|
|
|
27
|
-
self.defs: dict[str, JsonSchema] = self.schema.
|
|
27
|
+
self.defs: dict[str, JsonSchema] = self.schema.get('$defs', {})
|
|
28
28
|
self.refs_stack = tuple[str, ...]()
|
|
29
29
|
self.recursive_refs = set[str]()
|
|
30
30
|
|
|
@@ -34,7 +34,11 @@ class WalkJsonSchema(ABC):
|
|
|
34
34
|
return schema
|
|
35
35
|
|
|
36
36
|
def walk(self) -> JsonSchema:
|
|
37
|
-
|
|
37
|
+
schema = deepcopy(self.schema)
|
|
38
|
+
|
|
39
|
+
# First, handle everything but $defs:
|
|
40
|
+
schema.pop('$defs', None)
|
|
41
|
+
handled = self._handle(schema)
|
|
38
42
|
|
|
39
43
|
if not self.prefer_inlined_defs and self.defs:
|
|
40
44
|
handled['$defs'] = {k: self._handle(v) for k, v in self.defs.items()}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import base64
|
|
4
|
+
import warnings
|
|
4
5
|
from collections.abc import AsyncIterator, Sequence
|
|
5
6
|
from contextlib import asynccontextmanager
|
|
6
7
|
from dataclasses import dataclass, field, replace
|
|
@@ -776,6 +777,22 @@ class _GeminiJsonSchema(WalkJsonSchema):
|
|
|
776
777
|
super().__init__(schema, prefer_inlined_defs=True, simplify_nullable_unions=True)
|
|
777
778
|
|
|
778
779
|
def transform(self, schema: JsonSchema) -> JsonSchema:
|
|
780
|
+
# Note: we need to remove `additionalProperties: False` since it is currently mishandled by Gemini
|
|
781
|
+
additional_properties = schema.pop(
|
|
782
|
+
'additionalProperties', None
|
|
783
|
+
) # don't pop yet so it's included in the warning
|
|
784
|
+
if additional_properties: # pragma: no cover
|
|
785
|
+
original_schema = {**schema, 'additionalProperties': additional_properties}
|
|
786
|
+
warnings.warn(
|
|
787
|
+
'`additionalProperties` is not supported by Gemini; it will be removed from the tool JSON schema.'
|
|
788
|
+
f' Full schema: {self.schema}\n\n'
|
|
789
|
+
f'Source of additionalProperties within the full schema: {original_schema}\n\n'
|
|
790
|
+
'If this came from a field with a type like `dict[str, MyType]`, that field will always be empty.\n\n'
|
|
791
|
+
"If Google's APIs are updated to support this properly, please create an issue on the PydanticAI GitHub"
|
|
792
|
+
' and we will fix this behavior.',
|
|
793
|
+
UserWarning,
|
|
794
|
+
)
|
|
795
|
+
|
|
779
796
|
schema.pop('title', None)
|
|
780
797
|
schema.pop('default', None)
|
|
781
798
|
schema.pop('$schema', None)
|
|
@@ -333,7 +333,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
333
333
|
) -> _messages.ToolReturnPart | _messages.RetryPromptPart:
|
|
334
334
|
try:
|
|
335
335
|
if isinstance(message.args, str):
|
|
336
|
-
args_dict = self._validator.validate_json(message.args)
|
|
336
|
+
args_dict = self._validator.validate_json(message.args or '{}')
|
|
337
337
|
else:
|
|
338
338
|
args_dict = self._validator.validate_python(message.args)
|
|
339
339
|
except ValidationError as e:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|