pydantic-ai-slim 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/_a2a.py +152 -38
- pydantic_ai/_function_schema.py +1 -1
- pydantic_ai/_griffe.py +2 -2
- pydantic_ai/_utils.py +4 -1
- pydantic_ai/agent.py +2 -2
- pydantic_ai/mcp.py +1 -1
- pydantic_ai/messages.py +22 -6
- pydantic_ai/models/anthropic.py +1 -1
- pydantic_ai/models/bedrock.py +9 -6
- pydantic_ai/models/gemini.py +1 -1
- pydantic_ai/models/google.py +1 -1
- pydantic_ai/models/groq.py +1 -1
- pydantic_ai/models/mistral.py +3 -1
- pydantic_ai/models/openai.py +1 -1
- pydantic_ai/profiles/openai.py +9 -1
- pydantic_ai/tools.py +5 -5
- {pydantic_ai_slim-0.3.7.dist-info → pydantic_ai_slim-0.4.1.dist-info}/METADATA +4 -4
- {pydantic_ai_slim-0.3.7.dist-info → pydantic_ai_slim-0.4.1.dist-info}/RECORD +21 -21
- {pydantic_ai_slim-0.3.7.dist-info → pydantic_ai_slim-0.4.1.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.3.7.dist-info → pydantic_ai_slim-0.4.1.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.3.7.dist-info → pydantic_ai_slim-0.4.1.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/_a2a.py
CHANGED
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations, annotations as _annotations
|
|
2
2
|
|
|
3
|
+
import uuid
|
|
3
4
|
from collections.abc import AsyncIterator, Sequence
|
|
4
5
|
from contextlib import asynccontextmanager
|
|
5
6
|
from dataclasses import dataclass
|
|
6
7
|
from functools import partial
|
|
7
|
-
from typing import Any, Generic
|
|
8
|
+
from typing import Any, Generic, TypeVar
|
|
8
9
|
|
|
10
|
+
from pydantic import TypeAdapter
|
|
9
11
|
from typing_extensions import assert_never
|
|
10
12
|
|
|
11
13
|
from pydantic_ai.messages import (
|
|
@@ -19,12 +21,17 @@ from pydantic_ai.messages import (
|
|
|
19
21
|
ModelResponse,
|
|
20
22
|
ModelResponsePart,
|
|
21
23
|
TextPart,
|
|
24
|
+
ThinkingPart,
|
|
25
|
+
ToolCallPart,
|
|
22
26
|
UserPromptPart,
|
|
23
27
|
VideoUrl,
|
|
24
28
|
)
|
|
25
29
|
|
|
26
30
|
from .agent import Agent, AgentDepsT, OutputDataT
|
|
27
31
|
|
|
32
|
+
# AgentWorker output type needs to be invariant for use in both parameter and return positions
|
|
33
|
+
WorkerOutputT = TypeVar('WorkerOutputT')
|
|
34
|
+
|
|
28
35
|
try:
|
|
29
36
|
from starlette.middleware import Middleware
|
|
30
37
|
from starlette.routing import Route
|
|
@@ -33,10 +40,11 @@ try:
|
|
|
33
40
|
from fasta2a.applications import FastA2A
|
|
34
41
|
from fasta2a.broker import Broker, InMemoryBroker
|
|
35
42
|
from fasta2a.schema import (
|
|
43
|
+
AgentProvider,
|
|
36
44
|
Artifact,
|
|
45
|
+
DataPart,
|
|
37
46
|
Message,
|
|
38
47
|
Part,
|
|
39
|
-
Provider,
|
|
40
48
|
Skill,
|
|
41
49
|
TaskIdParams,
|
|
42
50
|
TaskSendParams,
|
|
@@ -72,7 +80,7 @@ def agent_to_a2a(
|
|
|
72
80
|
url: str = 'http://localhost:8000',
|
|
73
81
|
version: str = '1.0.0',
|
|
74
82
|
description: str | None = None,
|
|
75
|
-
provider:
|
|
83
|
+
provider: AgentProvider | None = None,
|
|
76
84
|
skills: list[Skill] | None = None,
|
|
77
85
|
# Starlette
|
|
78
86
|
debug: bool = False,
|
|
@@ -106,59 +114,121 @@ def agent_to_a2a(
|
|
|
106
114
|
|
|
107
115
|
|
|
108
116
|
@dataclass
|
|
109
|
-
class AgentWorker(Worker, Generic[
|
|
117
|
+
class AgentWorker(Worker[list[ModelMessage]], Generic[WorkerOutputT, AgentDepsT]):
|
|
110
118
|
"""A worker that uses an agent to execute tasks."""
|
|
111
119
|
|
|
112
|
-
agent: Agent[AgentDepsT,
|
|
120
|
+
agent: Agent[AgentDepsT, WorkerOutputT]
|
|
113
121
|
|
|
114
122
|
async def run_task(self, params: TaskSendParams) -> None:
|
|
115
|
-
task = await self.storage.load_task(params['id']
|
|
116
|
-
|
|
117
|
-
|
|
123
|
+
task = await self.storage.load_task(params['id'])
|
|
124
|
+
if task is None:
|
|
125
|
+
raise ValueError(f'Task {params["id"]} not found') # pragma: no cover
|
|
126
|
+
|
|
127
|
+
# TODO(Marcelo): Should we lock `run_task` on the `context_id`?
|
|
128
|
+
# Ensure this task hasn't been run before
|
|
129
|
+
if task['status']['state'] != 'submitted':
|
|
130
|
+
raise ValueError( # pragma: no cover
|
|
131
|
+
f'Task {params["id"]} has already been processed (state: {task["status"]["state"]})'
|
|
132
|
+
)
|
|
118
133
|
|
|
119
134
|
await self.storage.update_task(task['id'], state='working')
|
|
120
135
|
|
|
121
|
-
#
|
|
122
|
-
|
|
136
|
+
# Load context - contains pydantic-ai message history from previous tasks in this conversation
|
|
137
|
+
message_history = await self.storage.load_context(task['context_id']) or []
|
|
138
|
+
message_history.extend(self.build_message_history(task.get('history', [])))
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
result = await self.agent.run(message_history=message_history) # type: ignore
|
|
123
142
|
|
|
124
|
-
|
|
125
|
-
message_history = self.build_message_history(task_history=task_history)
|
|
143
|
+
await self.storage.update_context(task['context_id'], result.all_messages())
|
|
126
144
|
|
|
127
|
-
|
|
128
|
-
|
|
145
|
+
# Convert new messages to A2A format for task history
|
|
146
|
+
a2a_messages: list[Message] = []
|
|
147
|
+
|
|
148
|
+
for message in result.new_messages():
|
|
149
|
+
if isinstance(message, ModelRequest):
|
|
150
|
+
# Skip user prompts - they're already in task history
|
|
151
|
+
continue
|
|
152
|
+
else:
|
|
153
|
+
# Convert response parts to A2A format
|
|
154
|
+
a2a_parts = self._response_parts_to_a2a(message.parts)
|
|
155
|
+
if a2a_parts: # Add if there are visible parts (text/thinking)
|
|
156
|
+
a2a_messages.append(
|
|
157
|
+
Message(role='agent', parts=a2a_parts, kind='message', message_id=str(uuid.uuid4()))
|
|
158
|
+
)
|
|
129
159
|
|
|
130
|
-
|
|
131
|
-
|
|
160
|
+
artifacts = self.build_artifacts(result.output)
|
|
161
|
+
except Exception:
|
|
162
|
+
await self.storage.update_task(task['id'], state='failed')
|
|
163
|
+
raise
|
|
164
|
+
else:
|
|
165
|
+
await self.storage.update_task(
|
|
166
|
+
task['id'], state='completed', new_artifacts=artifacts, new_messages=a2a_messages
|
|
167
|
+
)
|
|
132
168
|
|
|
133
169
|
async def cancel_task(self, params: TaskIdParams) -> None:
|
|
134
170
|
pass
|
|
135
171
|
|
|
136
|
-
def build_artifacts(self, result:
|
|
137
|
-
|
|
138
|
-
return [Artifact(name='result', index=0, parts=[A2ATextPart(type='text', text=str(result))])]
|
|
172
|
+
def build_artifacts(self, result: WorkerOutputT) -> list[Artifact]:
|
|
173
|
+
"""Build artifacts from agent result.
|
|
139
174
|
|
|
140
|
-
|
|
175
|
+
All agent outputs become artifacts to mark them as durable task outputs.
|
|
176
|
+
For string results, we use TextPart. For structured data, we use DataPart.
|
|
177
|
+
Metadata is included to preserve type information.
|
|
178
|
+
"""
|
|
179
|
+
artifact_id = str(uuid.uuid4())
|
|
180
|
+
part = self._convert_result_to_part(result)
|
|
181
|
+
return [Artifact(artifact_id=artifact_id, name='result', parts=[part])]
|
|
182
|
+
|
|
183
|
+
def _convert_result_to_part(self, result: WorkerOutputT) -> Part:
|
|
184
|
+
"""Convert agent result to a Part (TextPart or DataPart).
|
|
185
|
+
|
|
186
|
+
For string results, returns a TextPart.
|
|
187
|
+
For structured data, returns a DataPart with properly serialized data.
|
|
188
|
+
"""
|
|
189
|
+
if isinstance(result, str):
|
|
190
|
+
return A2ATextPart(kind='text', text=result)
|
|
191
|
+
else:
|
|
192
|
+
output_type = type(result)
|
|
193
|
+
type_adapter = TypeAdapter(output_type)
|
|
194
|
+
data = type_adapter.dump_python(result, mode='json')
|
|
195
|
+
json_schema = type_adapter.json_schema(mode='serialization')
|
|
196
|
+
return DataPart(kind='data', data={'result': data}, metadata={'json_schema': json_schema})
|
|
197
|
+
|
|
198
|
+
def build_message_history(self, history: list[Message]) -> list[ModelMessage]:
|
|
141
199
|
model_messages: list[ModelMessage] = []
|
|
142
|
-
for message in
|
|
200
|
+
for message in history:
|
|
143
201
|
if message['role'] == 'user':
|
|
144
|
-
model_messages.append(ModelRequest(parts=self.
|
|
202
|
+
model_messages.append(ModelRequest(parts=self._request_parts_from_a2a(message['parts'])))
|
|
145
203
|
else:
|
|
146
|
-
model_messages.append(ModelResponse(parts=self.
|
|
204
|
+
model_messages.append(ModelResponse(parts=self._response_parts_from_a2a(message['parts'])))
|
|
147
205
|
return model_messages
|
|
148
206
|
|
|
149
|
-
def
|
|
207
|
+
def _request_parts_from_a2a(self, parts: list[Part]) -> list[ModelRequestPart]:
|
|
208
|
+
"""Convert A2A Part objects to pydantic-ai ModelRequestPart objects.
|
|
209
|
+
|
|
210
|
+
This handles the conversion from A2A protocol parts (text, file, data) to
|
|
211
|
+
pydantic-ai's internal request parts (UserPromptPart with various content types).
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
parts: List of A2A Part objects from incoming messages
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
List of ModelRequestPart objects for the pydantic-ai agent
|
|
218
|
+
"""
|
|
150
219
|
model_parts: list[ModelRequestPart] = []
|
|
151
220
|
for part in parts:
|
|
152
|
-
if part['
|
|
221
|
+
if part['kind'] == 'text':
|
|
153
222
|
model_parts.append(UserPromptPart(content=part['text']))
|
|
154
|
-
elif part['
|
|
155
|
-
|
|
156
|
-
if '
|
|
157
|
-
data =
|
|
158
|
-
|
|
223
|
+
elif part['kind'] == 'file':
|
|
224
|
+
file_content = part['file']
|
|
225
|
+
if 'bytes' in file_content:
|
|
226
|
+
data = file_content['bytes'].encode('utf-8')
|
|
227
|
+
mime_type = file_content.get('mime_type', 'application/octet-stream')
|
|
228
|
+
content = BinaryContent(data=data, media_type=mime_type)
|
|
159
229
|
model_parts.append(UserPromptPart(content=[content]))
|
|
160
230
|
else:
|
|
161
|
-
url =
|
|
231
|
+
url = file_content['uri']
|
|
162
232
|
for url_cls in (DocumentUrl, AudioUrl, ImageUrl, VideoUrl):
|
|
163
233
|
content = url_cls(url=url)
|
|
164
234
|
try:
|
|
@@ -168,24 +238,68 @@ class AgentWorker(Worker, Generic[AgentDepsT, OutputDataT]):
|
|
|
168
238
|
else:
|
|
169
239
|
break
|
|
170
240
|
else:
|
|
171
|
-
raise ValueError(f'
|
|
241
|
+
raise ValueError(f'Unsupported file type: {url}') # pragma: no cover
|
|
172
242
|
model_parts.append(UserPromptPart(content=[content]))
|
|
173
|
-
elif part['
|
|
174
|
-
# TODO(Marcelo): Maybe we should use this for `ToolReturnPart`, and `RetryPromptPart`.
|
|
243
|
+
elif part['kind'] == 'data':
|
|
175
244
|
raise NotImplementedError('Data parts are not supported yet.')
|
|
176
245
|
else:
|
|
177
246
|
assert_never(part)
|
|
178
247
|
return model_parts
|
|
179
248
|
|
|
180
|
-
def
|
|
249
|
+
def _response_parts_from_a2a(self, parts: list[Part]) -> list[ModelResponsePart]:
|
|
250
|
+
"""Convert A2A Part objects to pydantic-ai ModelResponsePart objects.
|
|
251
|
+
|
|
252
|
+
This handles the conversion from A2A protocol parts (text, file, data) to
|
|
253
|
+
pydantic-ai's internal response parts. Currently only supports text parts
|
|
254
|
+
as agent responses in A2A are expected to be text-based.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
parts: List of A2A Part objects from stored agent messages
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
List of ModelResponsePart objects for message history
|
|
261
|
+
"""
|
|
181
262
|
model_parts: list[ModelResponsePart] = []
|
|
182
263
|
for part in parts:
|
|
183
|
-
if part['
|
|
264
|
+
if part['kind'] == 'text':
|
|
184
265
|
model_parts.append(TextPart(content=part['text']))
|
|
185
|
-
elif part['
|
|
266
|
+
elif part['kind'] == 'file': # pragma: no cover
|
|
186
267
|
raise NotImplementedError('File parts are not supported yet.')
|
|
187
|
-
elif part['
|
|
268
|
+
elif part['kind'] == 'data': # pragma: no cover
|
|
188
269
|
raise NotImplementedError('Data parts are not supported yet.')
|
|
189
270
|
else: # pragma: no cover
|
|
190
271
|
assert_never(part)
|
|
191
272
|
return model_parts
|
|
273
|
+
|
|
274
|
+
def _response_parts_to_a2a(self, parts: list[ModelResponsePart]) -> list[Part]:
|
|
275
|
+
"""Convert pydantic-ai ModelResponsePart objects to A2A Part objects.
|
|
276
|
+
|
|
277
|
+
This handles the conversion from pydantic-ai's internal response parts to
|
|
278
|
+
A2A protocol parts. Different part types are handled as follows:
|
|
279
|
+
- TextPart: Converted directly to A2A TextPart
|
|
280
|
+
- ThinkingPart: Converted to TextPart with metadata indicating it's thinking
|
|
281
|
+
- ToolCallPart: Skipped (internal to agent execution)
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
parts: List of ModelResponsePart objects from agent response
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
List of A2A Part objects suitable for sending via A2A protocol
|
|
288
|
+
"""
|
|
289
|
+
a2a_parts: list[Part] = []
|
|
290
|
+
for part in parts:
|
|
291
|
+
if isinstance(part, TextPart):
|
|
292
|
+
a2a_parts.append(A2ATextPart(kind='text', text=part.content))
|
|
293
|
+
elif isinstance(part, ThinkingPart):
|
|
294
|
+
# Convert thinking to text with metadata
|
|
295
|
+
a2a_parts.append(
|
|
296
|
+
A2ATextPart(
|
|
297
|
+
kind='text',
|
|
298
|
+
text=part.content,
|
|
299
|
+
metadata={'type': 'thinking', 'thinking_id': part.id, 'signature': part.signature},
|
|
300
|
+
)
|
|
301
|
+
)
|
|
302
|
+
elif isinstance(part, ToolCallPart):
|
|
303
|
+
# Skip tool calls - they're internal to agent execution
|
|
304
|
+
pass
|
|
305
|
+
return a2a_parts
|
pydantic_ai/_function_schema.py
CHANGED
|
@@ -35,7 +35,7 @@ class FunctionSchema:
|
|
|
35
35
|
"""Internal information about a function schema."""
|
|
36
36
|
|
|
37
37
|
function: Callable[..., Any]
|
|
38
|
-
description: str
|
|
38
|
+
description: str | None
|
|
39
39
|
validator: SchemaValidator
|
|
40
40
|
json_schema: ObjectJsonSchema
|
|
41
41
|
# if not None, the function takes a single by that name (besides potentially `info`)
|
pydantic_ai/_griffe.py
CHANGED
|
@@ -19,7 +19,7 @@ def doc_descriptions(
|
|
|
19
19
|
sig: Signature,
|
|
20
20
|
*,
|
|
21
21
|
docstring_format: DocstringFormat,
|
|
22
|
-
) -> tuple[str, dict[str, str]]:
|
|
22
|
+
) -> tuple[str | None, dict[str, str]]:
|
|
23
23
|
"""Extract the function description and parameter descriptions from a function's docstring.
|
|
24
24
|
|
|
25
25
|
The function parses the docstring using the specified format (or infers it if 'auto')
|
|
@@ -35,7 +35,7 @@ def doc_descriptions(
|
|
|
35
35
|
"""
|
|
36
36
|
doc = func.__doc__
|
|
37
37
|
if doc is None:
|
|
38
|
-
return
|
|
38
|
+
return None, {}
|
|
39
39
|
|
|
40
40
|
# see https://github.com/mkdocstrings/griffe/issues/293
|
|
41
41
|
parent = cast(GriffeObject, sig)
|
pydantic_ai/_utils.py
CHANGED
|
@@ -315,8 +315,11 @@ def dataclasses_no_defaults_repr(self: Any) -> str:
|
|
|
315
315
|
return f'{self.__class__.__qualname__}({", ".join(kv_pairs)})'
|
|
316
316
|
|
|
317
317
|
|
|
318
|
+
_datetime_ta = TypeAdapter(datetime)
|
|
319
|
+
|
|
320
|
+
|
|
318
321
|
def number_to_datetime(x: int | float) -> datetime:
|
|
319
|
-
return
|
|
322
|
+
return _datetime_ta.validate_python(x)
|
|
320
323
|
|
|
321
324
|
|
|
322
325
|
AwaitableCallable = Callable[..., Awaitable[T]]
|
pydantic_ai/agent.py
CHANGED
|
@@ -63,7 +63,7 @@ if TYPE_CHECKING:
|
|
|
63
63
|
|
|
64
64
|
from fasta2a.applications import FastA2A
|
|
65
65
|
from fasta2a.broker import Broker
|
|
66
|
-
from fasta2a.schema import
|
|
66
|
+
from fasta2a.schema import AgentProvider, Skill
|
|
67
67
|
from fasta2a.storage import Storage
|
|
68
68
|
from pydantic_ai.mcp import MCPServer
|
|
69
69
|
|
|
@@ -1764,7 +1764,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1764
1764
|
url: str = 'http://localhost:8000',
|
|
1765
1765
|
version: str = '1.0.0',
|
|
1766
1766
|
description: str | None = None,
|
|
1767
|
-
provider:
|
|
1767
|
+
provider: AgentProvider | None = None,
|
|
1768
1768
|
skills: list[Skill] | None = None,
|
|
1769
1769
|
# Starlette
|
|
1770
1770
|
debug: bool = False,
|
pydantic_ai/mcp.py
CHANGED
|
@@ -98,7 +98,7 @@ class MCPServer(ABC):
|
|
|
98
98
|
return [
|
|
99
99
|
tools.ToolDefinition(
|
|
100
100
|
name=self.get_prefixed_tool_name(tool.name),
|
|
101
|
-
description=tool.description
|
|
101
|
+
description=tool.description,
|
|
102
102
|
parameters_json_schema=tool.inputSchema,
|
|
103
103
|
)
|
|
104
104
|
for tool in mcp_tools.tools
|
pydantic_ai/messages.py
CHANGED
|
@@ -25,7 +25,7 @@ if TYPE_CHECKING:
|
|
|
25
25
|
from .models.instrumented import InstrumentationSettings
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
AudioMediaType: TypeAlias = Literal['audio/wav', 'audio/mpeg']
|
|
28
|
+
AudioMediaType: TypeAlias = Literal['audio/wav', 'audio/mpeg', 'audio/ogg', 'audio/flac', 'audio/aiff', 'audio/aac']
|
|
29
29
|
ImageMediaType: TypeAlias = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
|
|
30
30
|
DocumentMediaType: TypeAlias = Literal[
|
|
31
31
|
'application/pdf',
|
|
@@ -48,7 +48,7 @@ VideoMediaType: TypeAlias = Literal[
|
|
|
48
48
|
'video/3gpp',
|
|
49
49
|
]
|
|
50
50
|
|
|
51
|
-
AudioFormat: TypeAlias = Literal['wav', 'mp3']
|
|
51
|
+
AudioFormat: TypeAlias = Literal['wav', 'mp3', 'oga', 'flac', 'aiff', 'aac']
|
|
52
52
|
ImageFormat: TypeAlias = Literal['jpeg', 'png', 'gif', 'webp']
|
|
53
53
|
DocumentFormat: TypeAlias = Literal['csv', 'doc', 'docx', 'html', 'md', 'pdf', 'txt', 'xls', 'xlsx']
|
|
54
54
|
VideoFormat: TypeAlias = Literal['mkv', 'mov', 'mp4', 'webm', 'flv', 'mpeg', 'mpg', 'wmv', 'three_gp']
|
|
@@ -182,13 +182,25 @@ class AudioUrl(FileUrl):
|
|
|
182
182
|
|
|
183
183
|
@property
|
|
184
184
|
def media_type(self) -> AudioMediaType:
|
|
185
|
-
"""Return the media type of the audio file, based on the url.
|
|
185
|
+
"""Return the media type of the audio file, based on the url.
|
|
186
|
+
|
|
187
|
+
References:
|
|
188
|
+
- Gemini: https://ai.google.dev/gemini-api/docs/audio#supported-formats
|
|
189
|
+
"""
|
|
186
190
|
if self.url.endswith('.mp3'):
|
|
187
191
|
return 'audio/mpeg'
|
|
188
|
-
|
|
192
|
+
if self.url.endswith('.wav'):
|
|
189
193
|
return 'audio/wav'
|
|
190
|
-
|
|
191
|
-
|
|
194
|
+
if self.url.endswith('.flac'):
|
|
195
|
+
return 'audio/flac'
|
|
196
|
+
if self.url.endswith('.oga'):
|
|
197
|
+
return 'audio/ogg'
|
|
198
|
+
if self.url.endswith('.aiff'):
|
|
199
|
+
return 'audio/aiff'
|
|
200
|
+
if self.url.endswith('.aac'):
|
|
201
|
+
return 'audio/aac'
|
|
202
|
+
|
|
203
|
+
raise ValueError(f'Unknown audio file extension: {self.url}')
|
|
192
204
|
|
|
193
205
|
@property
|
|
194
206
|
def format(self) -> AudioFormat:
|
|
@@ -358,6 +370,10 @@ _document_format_lookup: dict[str, DocumentFormat] = {
|
|
|
358
370
|
_audio_format_lookup: dict[str, AudioFormat] = {
|
|
359
371
|
'audio/mpeg': 'mp3',
|
|
360
372
|
'audio/wav': 'wav',
|
|
373
|
+
'audio/flac': 'flac',
|
|
374
|
+
'audio/ogg': 'oga',
|
|
375
|
+
'audio/aiff': 'aiff',
|
|
376
|
+
'audio/aac': 'aac',
|
|
361
377
|
}
|
|
362
378
|
_image_format_lookup: dict[str, ImageFormat] = {
|
|
363
379
|
'image/jpeg': 'jpeg',
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -416,7 +416,7 @@ class AnthropicModel(Model):
|
|
|
416
416
|
def _map_tool_definition(f: ToolDefinition) -> BetaToolParam:
|
|
417
417
|
return {
|
|
418
418
|
'name': f.name,
|
|
419
|
-
'description': f.description,
|
|
419
|
+
'description': f.description or '',
|
|
420
420
|
'input_schema': f.parameters_json_schema,
|
|
421
421
|
}
|
|
422
422
|
|
pydantic_ai/models/bedrock.py
CHANGED
|
@@ -62,6 +62,7 @@ if TYPE_CHECKING:
|
|
|
62
62
|
SystemContentBlockTypeDef,
|
|
63
63
|
ToolChoiceTypeDef,
|
|
64
64
|
ToolConfigurationTypeDef,
|
|
65
|
+
ToolSpecificationTypeDef,
|
|
65
66
|
ToolTypeDef,
|
|
66
67
|
VideoBlockTypeDef,
|
|
67
68
|
)
|
|
@@ -228,14 +229,16 @@ class BedrockConverseModel(Model):
|
|
|
228
229
|
|
|
229
230
|
@staticmethod
|
|
230
231
|
def _map_tool_definition(f: ToolDefinition) -> ToolTypeDef:
|
|
231
|
-
|
|
232
|
-
'
|
|
233
|
-
|
|
234
|
-
'description': f.description,
|
|
235
|
-
'inputSchema': {'json': f.parameters_json_schema},
|
|
236
|
-
}
|
|
232
|
+
tool_spec: ToolSpecificationTypeDef = {
|
|
233
|
+
'name': f.name,
|
|
234
|
+
'inputSchema': {'json': f.parameters_json_schema},
|
|
237
235
|
}
|
|
238
236
|
|
|
237
|
+
if f.description: # pragma: no branch
|
|
238
|
+
tool_spec['description'] = f.description
|
|
239
|
+
|
|
240
|
+
return {'toolSpec': tool_spec}
|
|
241
|
+
|
|
239
242
|
@property
|
|
240
243
|
def base_url(self) -> str:
|
|
241
244
|
return str(self.client.meta.endpoint_url)
|
pydantic_ai/models/gemini.py
CHANGED
|
@@ -773,7 +773,7 @@ class _GeminiFunction(TypedDict):
|
|
|
773
773
|
|
|
774
774
|
def _function_from_abstract_tool(tool: ToolDefinition) -> _GeminiFunction:
|
|
775
775
|
json_schema = tool.parameters_json_schema
|
|
776
|
-
f = _GeminiFunction(name=tool.name, description=tool.description, parameters=json_schema)
|
|
776
|
+
f = _GeminiFunction(name=tool.name, description=tool.description or '', parameters=json_schema)
|
|
777
777
|
return f
|
|
778
778
|
|
|
779
779
|
|
pydantic_ai/models/google.py
CHANGED
|
@@ -534,7 +534,7 @@ def _function_declaration_from_tool(tool: ToolDefinition) -> FunctionDeclaration
|
|
|
534
534
|
json_schema = tool.parameters_json_schema
|
|
535
535
|
f = FunctionDeclarationDict(
|
|
536
536
|
name=tool.name,
|
|
537
|
-
description=tool.description,
|
|
537
|
+
description=tool.description or '',
|
|
538
538
|
parameters=json_schema, # type: ignore
|
|
539
539
|
)
|
|
540
540
|
return f
|
pydantic_ai/models/groq.py
CHANGED
pydantic_ai/models/mistral.py
CHANGED
|
@@ -306,7 +306,9 @@ class MistralModel(Model):
|
|
|
306
306
|
)
|
|
307
307
|
tools = [
|
|
308
308
|
MistralTool(
|
|
309
|
-
function=MistralFunction(
|
|
309
|
+
function=MistralFunction(
|
|
310
|
+
name=r.name, parameters=r.parameters_json_schema, description=r.description or ''
|
|
311
|
+
)
|
|
310
312
|
)
|
|
311
313
|
for r in all_tools
|
|
312
314
|
]
|
pydantic_ai/models/openai.py
CHANGED
pydantic_ai/profiles/openai.py
CHANGED
|
@@ -93,10 +93,18 @@ class OpenAIJsonSchemaTransformer(JsonSchemaTransformer):
|
|
|
93
93
|
def transform(self, schema: JsonSchema) -> JsonSchema: # noqa C901
|
|
94
94
|
# Remove unnecessary keys
|
|
95
95
|
schema.pop('title', None)
|
|
96
|
-
schema.pop('default', None)
|
|
97
96
|
schema.pop('$schema', None)
|
|
98
97
|
schema.pop('discriminator', None)
|
|
99
98
|
|
|
99
|
+
default = schema.get('default', _sentinel)
|
|
100
|
+
if default is not _sentinel:
|
|
101
|
+
# the "default" keyword is not allowed in strict mode, but including it makes some Ollama models behave
|
|
102
|
+
# better, so we keep it around when not strict
|
|
103
|
+
if self.strict is True:
|
|
104
|
+
schema.pop('default', None)
|
|
105
|
+
elif self.strict is None: # pragma: no branch
|
|
106
|
+
self.is_strict_compatible = False
|
|
107
|
+
|
|
100
108
|
if schema_ref := schema.get('$ref'):
|
|
101
109
|
if schema_ref == self.root_ref:
|
|
102
110
|
schema['$ref'] = '#'
|
pydantic_ai/tools.py
CHANGED
|
@@ -161,7 +161,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
161
161
|
takes_ctx: bool
|
|
162
162
|
max_retries: int | None
|
|
163
163
|
name: str
|
|
164
|
-
description: str
|
|
164
|
+
description: str | None
|
|
165
165
|
prepare: ToolPrepareFunc[AgentDepsT] | None
|
|
166
166
|
docstring_format: DocstringFormat
|
|
167
167
|
require_parameter_descriptions: bool
|
|
@@ -269,7 +269,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
269
269
|
cls,
|
|
270
270
|
function: Callable[..., Any],
|
|
271
271
|
name: str,
|
|
272
|
-
description: str,
|
|
272
|
+
description: str | None,
|
|
273
273
|
json_schema: JsonSchemaValue,
|
|
274
274
|
) -> Self:
|
|
275
275
|
"""Creates a Pydantic tool from a function and a JSON schema.
|
|
@@ -440,12 +440,12 @@ class ToolDefinition:
|
|
|
440
440
|
name: str
|
|
441
441
|
"""The name of the tool."""
|
|
442
442
|
|
|
443
|
-
description: str
|
|
444
|
-
"""The description of the tool."""
|
|
445
|
-
|
|
446
443
|
parameters_json_schema: ObjectJsonSchema
|
|
447
444
|
"""The JSON schema for the tool's parameters."""
|
|
448
445
|
|
|
446
|
+
description: str | None = None
|
|
447
|
+
"""The description of the tool."""
|
|
448
|
+
|
|
449
449
|
outer_typed_dict_key: str | None = None
|
|
450
450
|
"""The key in the outer [TypedDict] that wraps an output tool.
|
|
451
451
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.1
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.4.1
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.
|
|
37
|
+
Requires-Dist: fasta2a==0.4.1; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.4.1; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
53
|
Requires-Dist: google-genai>=1.24.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
@@ -1,29 +1,29 @@
|
|
|
1
1
|
pydantic_ai/__init__.py,sha256=Ns04g4Efqkzwccs8w2nGphfWbptMlIJYG8vIJbGGyG0,1262
|
|
2
2
|
pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
3
|
-
pydantic_ai/_a2a.py,sha256=
|
|
3
|
+
pydantic_ai/_a2a.py,sha256=G6W8zLRE5FNug19GieVkYuGPw5CA44YeZnS7GTN7M30,12068
|
|
4
4
|
pydantic_ai/_agent_graph.py,sha256=rtzyBXN4bzEDBeRkRwF031ORktSMbuGz9toZmSqUxNI,42153
|
|
5
5
|
pydantic_ai/_cli.py,sha256=R-sE-9gYqPxV5-5utso4g-bzAKMiTCdo33XOVqE0ZEg,13206
|
|
6
|
-
pydantic_ai/_function_schema.py,sha256=
|
|
7
|
-
pydantic_ai/_griffe.py,sha256=
|
|
6
|
+
pydantic_ai/_function_schema.py,sha256=BZus5y51eqiGQKxQIcCiDoSPml3AtAb12-st_aujU2k,10813
|
|
7
|
+
pydantic_ai/_griffe.py,sha256=Ugft16ZHw9CN_6-lW0Svn6jESK9zHXO_x4utkGBkbBI,5253
|
|
8
8
|
pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
|
|
9
9
|
pydantic_ai/_output.py,sha256=8qOx2hEwxpcoS5P8OLqOAWj94KfODDVqrPHnEIhI-90,33164
|
|
10
10
|
pydantic_ai/_parts_manager.py,sha256=Lioi8b7Nfyax09yQu8jTkMzxd26dYDrdAqhYvjRSKqQ,16182
|
|
11
11
|
pydantic_ai/_run_context.py,sha256=zNkSyiQSH-YweO39ii3iB2taouUOodo3sTjz2Lrj4Pc,1792
|
|
12
12
|
pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
|
|
13
13
|
pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
|
|
14
|
-
pydantic_ai/_utils.py,sha256=
|
|
15
|
-
pydantic_ai/agent.py,sha256=
|
|
14
|
+
pydantic_ai/_utils.py,sha256=SGXEiGCnMae1Iz_eZKUs6ni_tGMPkDaJ4W3W3YMoP5w,15545
|
|
15
|
+
pydantic_ai/agent.py,sha256=zvQgEG9eFG7entCTum3QSApHbNU8RvAE_ydscPaMAC4,96196
|
|
16
16
|
pydantic_ai/direct.py,sha256=WRfgke3zm-eeR39LTuh9XI2TrdHXAqO81eDvFwih4Ko,14803
|
|
17
17
|
pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
|
|
18
18
|
pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
|
|
19
19
|
pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
|
|
20
|
-
pydantic_ai/mcp.py,sha256=
|
|
21
|
-
pydantic_ai/messages.py,sha256=
|
|
20
|
+
pydantic_ai/mcp.py,sha256=6RvxXIn6bUlL2XWpX69i8G3atU-HLLZBgKc93dYqeVo,21830
|
|
21
|
+
pydantic_ai/messages.py,sha256=ykB4jzDwPGFkgQSJagOdurBv5-DTtCaY-y9671FYz7E,39256
|
|
22
22
|
pydantic_ai/output.py,sha256=gq-8H2YKgbKSTxp_HUMym57ZUkwupHyS4sCOzedlXTI,9315
|
|
23
23
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
24
|
pydantic_ai/result.py,sha256=GVzXf7yjR2lKBDw9k-8PlhJgCpE3dVHiyLL0dFPvs7I,25603
|
|
25
25
|
pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
|
|
26
|
-
pydantic_ai/tools.py,sha256=
|
|
26
|
+
pydantic_ai/tools.py,sha256=ZZ5DZMzSLMZkM9y_G3fx5YnVTki6daPYgRkfuNXAQ-M,17774
|
|
27
27
|
pydantic_ai/usage.py,sha256=35YPmItlzfNOwP35Rhh0qBUOlg5On5rUE7xqHQWrpaU,5596
|
|
28
28
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
29
|
pydantic_ai/common_tools/duckduckgo.py,sha256=Ty9tu1rCwMfGKgz1JAaC2q_4esmL6QvpkHQUN8F0Ecc,2152
|
|
@@ -32,18 +32,18 @@ pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
32
32
|
pydantic_ai/ext/aci.py,sha256=eiuWamUh90kexWyuGw_Fw2kM-EAA6Pv-IfNhf5hQ8fs,2123
|
|
33
33
|
pydantic_ai/ext/langchain.py,sha256=iSyACZiJDDvxr0BKYl9dLxe4BPezCBHxgz_2Vk3W-Ak,1973
|
|
34
34
|
pydantic_ai/models/__init__.py,sha256=B8vG0crUDCO3Bvd8fVeMNPzZH2Un61rEJFxSaumoUl4,29101
|
|
35
|
-
pydantic_ai/models/anthropic.py,sha256=
|
|
36
|
-
pydantic_ai/models/bedrock.py,sha256=
|
|
35
|
+
pydantic_ai/models/anthropic.py,sha256=ooRh6Yh0jLj78IKjgaYTN0UbB2Ku8ZhuEBi8v8kymoE,23679
|
|
36
|
+
pydantic_ai/models/bedrock.py,sha256=i8BNOFEYGiRYA4ZEFwqHzJHf3EP54akVzZHdEUJohiw,29234
|
|
37
37
|
pydantic_ai/models/cohere.py,sha256=qgYegjfOsqXbRcjXCbg0jaexbuxh1SrS9_mZdzzJVbM,12623
|
|
38
38
|
pydantic_ai/models/fallback.py,sha256=sTYw8wW8iGgFIPG2Oynsucb9orG6wbV_h-9k5vKil4I,5103
|
|
39
39
|
pydantic_ai/models/function.py,sha256=nfCjRmbcF7sdK_nsak1fvzz9Xkptx5WhsxvWdB02zec,12113
|
|
40
|
-
pydantic_ai/models/gemini.py,sha256=
|
|
41
|
-
pydantic_ai/models/google.py,sha256=
|
|
42
|
-
pydantic_ai/models/groq.py,sha256=
|
|
40
|
+
pydantic_ai/models/gemini.py,sha256=22qucwayi8x20yvZY6qeHH4WRyEObfIkrCQ5cluejdQ,38488
|
|
41
|
+
pydantic_ai/models/google.py,sha256=PFioCPeuf5_f80s9NiRSxFZawvfYbUUhpaW7mUg8frg,24072
|
|
42
|
+
pydantic_ai/models/groq.py,sha256=tmYTPKsMMhtIms_9muPKYQvGZ98b_kax7t8H1YE1vPU,18500
|
|
43
43
|
pydantic_ai/models/instrumented.py,sha256=olTa7Fl2BwHLvTLT6sSrS2HOS7UyWg182Xujx8hutBw,15947
|
|
44
44
|
pydantic_ai/models/mcp_sampling.py,sha256=q9nnjNEAAbhrfRc_Qw5z9TtCHMG_SwlCWW9FvKWjh8k,3395
|
|
45
|
-
pydantic_ai/models/mistral.py,sha256=
|
|
46
|
-
pydantic_ai/models/openai.py,sha256=
|
|
45
|
+
pydantic_ai/models/mistral.py,sha256=d_TQjSQukSztNt6JpFQCqugYTxXQ97GaQBc3zUxOSSA,30555
|
|
46
|
+
pydantic_ai/models/openai.py,sha256=ReqpM4gdM0TPSwUCGu2L8VoBFsxy2Y-8PRFhI6d5KcI,53646
|
|
47
47
|
pydantic_ai/models/test.py,sha256=STNd79ZoCyyphm0eFRNDoTpvkOzhw1qFw1zgv44kqsg,17441
|
|
48
48
|
pydantic_ai/models/wrapper.py,sha256=2g06TxE5kFqfaJCwsDJHp7Rltoj0XXH0OzdpRDOcqNo,1861
|
|
49
49
|
pydantic_ai/profiles/__init__.py,sha256=BXMqUpgRfosmYgcxjKAI9ESCj47JTSa30DhKXEgVLzM,2419
|
|
@@ -56,7 +56,7 @@ pydantic_ai/profiles/google.py,sha256=DJ0otpkCgVIrjwV2lzAUAejw8ivwZT9pNAY_sGRcrV
|
|
|
56
56
|
pydantic_ai/profiles/grok.py,sha256=nBOxOCYCK9aiLmz2Q-esqYhotNbbBC1boAoOYIk1tVw,211
|
|
57
57
|
pydantic_ai/profiles/meta.py,sha256=IAGPoUrLWd-g9ajAgpWp9fIeOrP-7dBlZ2HEFjIhUbY,334
|
|
58
58
|
pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
|
|
59
|
-
pydantic_ai/profiles/openai.py,sha256=
|
|
59
|
+
pydantic_ai/profiles/openai.py,sha256=wFFtzbM22HbxxRNDXYEs6tr6_RSbv8xN_xBPz6RsP9s,6698
|
|
60
60
|
pydantic_ai/profiles/qwen.py,sha256=u7pL8uomoQTVl45g5wDrHx0P_oFDLaN6ALswuwmkWc0,334
|
|
61
61
|
pydantic_ai/providers/__init__.py,sha256=JNsVZ1PBx_9hUJZbnoRIDJCkWbrJbk69w-SFqjoG-6c,3654
|
|
62
62
|
pydantic_ai/providers/anthropic.py,sha256=D35UXxCPXv8yIbD0fj9Zg2FvNyoMoJMeDUtVM8Sn78I,3046
|
|
@@ -76,8 +76,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
|
|
|
76
76
|
pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
|
|
77
77
|
pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
|
|
78
78
|
pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
|
|
79
|
-
pydantic_ai_slim-0.
|
|
80
|
-
pydantic_ai_slim-0.
|
|
81
|
-
pydantic_ai_slim-0.
|
|
82
|
-
pydantic_ai_slim-0.
|
|
83
|
-
pydantic_ai_slim-0.
|
|
79
|
+
pydantic_ai_slim-0.4.1.dist-info/METADATA,sha256=PqGrAd6qbv0rxMgiCa6N1Lo1mBvMb3XUC_FGaxuzeAY,3846
|
|
80
|
+
pydantic_ai_slim-0.4.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
81
|
+
pydantic_ai_slim-0.4.1.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
82
|
+
pydantic_ai_slim-0.4.1.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
83
|
+
pydantic_ai_slim-0.4.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|