pydantic-ai-slim 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/_function_schema.py +1 -1
- pydantic_ai/_griffe.py +2 -2
- pydantic_ai/_utils.py +4 -1
- pydantic_ai/agent.py +15 -7
- pydantic_ai/direct.py +191 -3
- pydantic_ai/ext/aci.py +66 -0
- pydantic_ai/ext/langchain.py +2 -2
- pydantic_ai/mcp.py +1 -1
- pydantic_ai/messages.py +36 -6
- pydantic_ai/models/__init__.py +11 -1
- pydantic_ai/models/anthropic.py +3 -4
- pydantic_ai/models/bedrock.py +11 -8
- pydantic_ai/models/cohere.py +2 -3
- pydantic_ai/models/gemini.py +3 -4
- pydantic_ai/models/google.py +19 -6
- pydantic_ai/models/groq.py +3 -4
- pydantic_ai/models/mcp_sampling.py +2 -3
- pydantic_ai/models/mistral.py +5 -4
- pydantic_ai/models/openai.py +6 -5
- pydantic_ai/profiles/openai.py +9 -1
- pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai/providers/github.py +112 -0
- pydantic_ai/result.py +7 -1
- pydantic_ai/tools.py +5 -5
- {pydantic_ai_slim-0.3.6.dist-info → pydantic_ai_slim-0.4.0.dist-info}/METADATA +5 -5
- {pydantic_ai_slim-0.3.6.dist-info → pydantic_ai_slim-0.4.0.dist-info}/RECORD +29 -27
- {pydantic_ai_slim-0.3.6.dist-info → pydantic_ai_slim-0.4.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.3.6.dist-info → pydantic_ai_slim-0.4.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.3.6.dist-info → pydantic_ai_slim-0.4.0.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/_function_schema.py
CHANGED
|
@@ -35,7 +35,7 @@ class FunctionSchema:
|
|
|
35
35
|
"""Internal information about a function schema."""
|
|
36
36
|
|
|
37
37
|
function: Callable[..., Any]
|
|
38
|
-
description: str
|
|
38
|
+
description: str | None
|
|
39
39
|
validator: SchemaValidator
|
|
40
40
|
json_schema: ObjectJsonSchema
|
|
41
41
|
# if not None, the function takes a single by that name (besides potentially `info`)
|
pydantic_ai/_griffe.py
CHANGED
|
@@ -19,7 +19,7 @@ def doc_descriptions(
|
|
|
19
19
|
sig: Signature,
|
|
20
20
|
*,
|
|
21
21
|
docstring_format: DocstringFormat,
|
|
22
|
-
) -> tuple[str, dict[str, str]]:
|
|
22
|
+
) -> tuple[str | None, dict[str, str]]:
|
|
23
23
|
"""Extract the function description and parameter descriptions from a function's docstring.
|
|
24
24
|
|
|
25
25
|
The function parses the docstring using the specified format (or infers it if 'auto')
|
|
@@ -35,7 +35,7 @@ def doc_descriptions(
|
|
|
35
35
|
"""
|
|
36
36
|
doc = func.__doc__
|
|
37
37
|
if doc is None:
|
|
38
|
-
return
|
|
38
|
+
return None, {}
|
|
39
39
|
|
|
40
40
|
# see https://github.com/mkdocstrings/griffe/issues/293
|
|
41
41
|
parent = cast(GriffeObject, sig)
|
pydantic_ai/_utils.py
CHANGED
|
@@ -315,8 +315,11 @@ def dataclasses_no_defaults_repr(self: Any) -> str:
|
|
|
315
315
|
return f'{self.__class__.__qualname__}({", ".join(kv_pairs)})'
|
|
316
316
|
|
|
317
317
|
|
|
318
|
+
_datetime_ta = TypeAdapter(datetime)
|
|
319
|
+
|
|
320
|
+
|
|
318
321
|
def number_to_datetime(x: int | float) -> datetime:
|
|
319
|
-
return
|
|
322
|
+
return _datetime_ta.validate_python(x)
|
|
320
323
|
|
|
321
324
|
|
|
322
325
|
AwaitableCallable = Callable[..., Awaitable[T]]
|
pydantic_ai/agent.py
CHANGED
|
@@ -296,7 +296,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
296
296
|
if 'result_type' in _deprecated_kwargs:
|
|
297
297
|
if output_type is not str: # pragma: no cover
|
|
298
298
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
299
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
|
|
299
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning, stacklevel=2)
|
|
300
300
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
301
301
|
|
|
302
302
|
self.output_type = output_type
|
|
@@ -310,6 +310,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
310
310
|
warnings.warn(
|
|
311
311
|
'`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
312
312
|
DeprecationWarning,
|
|
313
|
+
stacklevel=2,
|
|
313
314
|
)
|
|
314
315
|
|
|
315
316
|
self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
|
|
@@ -317,12 +318,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
317
318
|
warnings.warn(
|
|
318
319
|
'`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
319
320
|
DeprecationWarning,
|
|
321
|
+
stacklevel=2,
|
|
320
322
|
)
|
|
321
323
|
result_retries = _deprecated_kwargs.pop('result_retries', None)
|
|
322
324
|
if result_retries is not None:
|
|
323
325
|
if output_retries is not None: # pragma: no cover
|
|
324
326
|
raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
|
|
325
|
-
warnings.warn(
|
|
327
|
+
warnings.warn(
|
|
328
|
+
'`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning, stacklevel=2
|
|
329
|
+
)
|
|
326
330
|
output_retries = result_retries
|
|
327
331
|
|
|
328
332
|
default_output_mode = (
|
|
@@ -472,7 +476,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
472
476
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
473
477
|
if output_type is not str:
|
|
474
478
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
475
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
479
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
476
480
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
477
481
|
|
|
478
482
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -640,7 +644,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
640
644
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
641
645
|
if output_type is not str:
|
|
642
646
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
643
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
647
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
644
648
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
645
649
|
|
|
646
650
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -879,7 +883,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
879
883
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
880
884
|
if output_type is not str:
|
|
881
885
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
882
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
886
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
883
887
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
884
888
|
|
|
885
889
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -997,7 +1001,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
997
1001
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
998
1002
|
if output_type is not str:
|
|
999
1003
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
1000
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
1004
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
1001
1005
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
1002
1006
|
|
|
1003
1007
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -1336,7 +1340,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1336
1340
|
return func
|
|
1337
1341
|
|
|
1338
1342
|
@deprecated('`result_validator` is deprecated, use `output_validator` instead.')
|
|
1339
|
-
def result_validator(self, func: Any, /) -> Any:
|
|
1343
|
+
def result_validator(self, func: Any, /) -> Any:
|
|
1344
|
+
warnings.warn(
|
|
1345
|
+
'`result_validator` is deprecated, use `output_validator` instead.', DeprecationWarning, stacklevel=2
|
|
1346
|
+
)
|
|
1347
|
+
return self.output_validator(func) # type: ignore
|
|
1340
1348
|
|
|
1341
1349
|
@overload
|
|
1342
1350
|
def tool(self, func: ToolFuncContext[AgentDepsT, ToolParams], /) -> ToolFuncContext[AgentDepsT, ToolParams]: ...
|
pydantic_ai/direct.py
CHANGED
|
@@ -8,14 +8,29 @@ These methods are thin wrappers around [`Model`][pydantic_ai.models.Model] imple
|
|
|
8
8
|
|
|
9
9
|
from __future__ import annotations as _annotations
|
|
10
10
|
|
|
11
|
+
import queue
|
|
12
|
+
import threading
|
|
13
|
+
from collections.abc import Iterator
|
|
11
14
|
from contextlib import AbstractAsyncContextManager
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from types import TracebackType
|
|
12
18
|
|
|
19
|
+
from pydantic_ai.usage import Usage
|
|
13
20
|
from pydantic_graph._utils import get_event_loop as _get_event_loop
|
|
14
21
|
|
|
15
22
|
from . import agent, messages, models, settings
|
|
16
|
-
from .models import instrumented as instrumented_models
|
|
23
|
+
from .models import StreamedResponse, instrumented as instrumented_models
|
|
17
24
|
|
|
18
|
-
__all__ =
|
|
25
|
+
__all__ = (
|
|
26
|
+
'model_request',
|
|
27
|
+
'model_request_sync',
|
|
28
|
+
'model_request_stream',
|
|
29
|
+
'model_request_stream_sync',
|
|
30
|
+
'StreamedResponseSync',
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
STREAM_INITIALIZATION_TIMEOUT = 30
|
|
19
34
|
|
|
20
35
|
|
|
21
36
|
async def model_request(
|
|
@@ -144,7 +159,7 @@ def model_request_stream(
|
|
|
144
159
|
|
|
145
160
|
async def main():
|
|
146
161
|
messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')] # (1)!
|
|
147
|
-
async with model_request_stream(
|
|
162
|
+
async with model_request_stream('openai:gpt-4.1-mini', messages) as stream:
|
|
148
163
|
chunks = []
|
|
149
164
|
async for chunk in stream:
|
|
150
165
|
chunks.append(chunk)
|
|
@@ -181,6 +196,63 @@ def model_request_stream(
|
|
|
181
196
|
)
|
|
182
197
|
|
|
183
198
|
|
|
199
|
+
def model_request_stream_sync(
|
|
200
|
+
model: models.Model | models.KnownModelName | str,
|
|
201
|
+
messages: list[messages.ModelMessage],
|
|
202
|
+
*,
|
|
203
|
+
model_settings: settings.ModelSettings | None = None,
|
|
204
|
+
model_request_parameters: models.ModelRequestParameters | None = None,
|
|
205
|
+
instrument: instrumented_models.InstrumentationSettings | bool | None = None,
|
|
206
|
+
) -> StreamedResponseSync:
|
|
207
|
+
"""Make a streamed synchronous request to a model.
|
|
208
|
+
|
|
209
|
+
This is the synchronous version of [`model_request_stream`][pydantic_ai.direct.model_request_stream].
|
|
210
|
+
It uses threading to run the asynchronous stream in the background while providing a synchronous iterator interface.
|
|
211
|
+
|
|
212
|
+
```py {title="model_request_stream_sync_example.py"}
|
|
213
|
+
|
|
214
|
+
from pydantic_ai.direct import model_request_stream_sync
|
|
215
|
+
from pydantic_ai.messages import ModelRequest
|
|
216
|
+
|
|
217
|
+
messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
|
|
218
|
+
with model_request_stream_sync('openai:gpt-4.1-mini', messages) as stream:
|
|
219
|
+
chunks = []
|
|
220
|
+
for chunk in stream:
|
|
221
|
+
chunks.append(chunk)
|
|
222
|
+
print(chunks)
|
|
223
|
+
'''
|
|
224
|
+
[
|
|
225
|
+
PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
|
|
226
|
+
PartDeltaEvent(
|
|
227
|
+
index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
|
|
228
|
+
),
|
|
229
|
+
PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
|
|
230
|
+
]
|
|
231
|
+
'''
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
|
|
236
|
+
messages: Messages to send to the model
|
|
237
|
+
model_settings: optional model settings
|
|
238
|
+
model_request_parameters: optional model request parameters
|
|
239
|
+
instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
|
|
240
|
+
[`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
A [sync stream response][pydantic_ai.direct.StreamedResponseSync] context manager.
|
|
244
|
+
"""
|
|
245
|
+
async_stream_cm = model_request_stream(
|
|
246
|
+
model=model,
|
|
247
|
+
messages=messages,
|
|
248
|
+
model_settings=model_settings,
|
|
249
|
+
model_request_parameters=model_request_parameters,
|
|
250
|
+
instrument=instrument,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return StreamedResponseSync(async_stream_cm)
|
|
254
|
+
|
|
255
|
+
|
|
184
256
|
def _prepare_model(
|
|
185
257
|
model: models.Model | models.KnownModelName | str,
|
|
186
258
|
instrument: instrumented_models.InstrumentationSettings | bool | None,
|
|
@@ -191,3 +263,119 @@ def _prepare_model(
|
|
|
191
263
|
instrument = agent.Agent._instrument_default # pyright: ignore[reportPrivateUsage]
|
|
192
264
|
|
|
193
265
|
return instrumented_models.instrument_model(model_instance, instrument)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@dataclass
|
|
269
|
+
class StreamedResponseSync:
|
|
270
|
+
"""Synchronous wrapper to async streaming responses by running the async producer in a background thread and providing a synchronous iterator.
|
|
271
|
+
|
|
272
|
+
This class must be used as a context manager with the `with` statement.
|
|
273
|
+
"""
|
|
274
|
+
|
|
275
|
+
_async_stream_cm: AbstractAsyncContextManager[StreamedResponse]
|
|
276
|
+
_queue: queue.Queue[messages.ModelResponseStreamEvent | Exception | None] = field(
|
|
277
|
+
default_factory=queue.Queue, init=False
|
|
278
|
+
)
|
|
279
|
+
_thread: threading.Thread | None = field(default=None, init=False)
|
|
280
|
+
_stream_response: StreamedResponse | None = field(default=None, init=False)
|
|
281
|
+
_exception: Exception | None = field(default=None, init=False)
|
|
282
|
+
_context_entered: bool = field(default=False, init=False)
|
|
283
|
+
_stream_ready: threading.Event = field(default_factory=threading.Event, init=False)
|
|
284
|
+
|
|
285
|
+
def __enter__(self) -> StreamedResponseSync:
|
|
286
|
+
self._context_entered = True
|
|
287
|
+
self._start_producer()
|
|
288
|
+
return self
|
|
289
|
+
|
|
290
|
+
def __exit__(
|
|
291
|
+
self,
|
|
292
|
+
_exc_type: type[BaseException] | None,
|
|
293
|
+
_exc_val: BaseException | None,
|
|
294
|
+
_exc_tb: TracebackType | None,
|
|
295
|
+
) -> None:
|
|
296
|
+
self._cleanup()
|
|
297
|
+
|
|
298
|
+
def __iter__(self) -> Iterator[messages.ModelResponseStreamEvent]:
|
|
299
|
+
"""Stream the response as an iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s."""
|
|
300
|
+
self._check_context_manager_usage()
|
|
301
|
+
|
|
302
|
+
while True:
|
|
303
|
+
item = self._queue.get()
|
|
304
|
+
if item is None: # End of stream
|
|
305
|
+
break
|
|
306
|
+
elif isinstance(item, Exception):
|
|
307
|
+
raise item
|
|
308
|
+
else:
|
|
309
|
+
yield item
|
|
310
|
+
|
|
311
|
+
def __repr__(self) -> str:
|
|
312
|
+
if self._stream_response:
|
|
313
|
+
return repr(self._stream_response)
|
|
314
|
+
else:
|
|
315
|
+
return f'{self.__class__.__name__}(context_entered={self._context_entered})'
|
|
316
|
+
|
|
317
|
+
__str__ = __repr__
|
|
318
|
+
|
|
319
|
+
def _check_context_manager_usage(self) -> None:
|
|
320
|
+
if not self._context_entered:
|
|
321
|
+
raise RuntimeError(
|
|
322
|
+
'StreamedResponseSync must be used as a context manager. '
|
|
323
|
+
'Use: `with model_request_stream_sync(...) as stream:`'
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
def _ensure_stream_ready(self) -> StreamedResponse:
|
|
327
|
+
self._check_context_manager_usage()
|
|
328
|
+
|
|
329
|
+
if self._stream_response is None:
|
|
330
|
+
# Wait for the background thread to signal that the stream is ready
|
|
331
|
+
if not self._stream_ready.wait(timeout=STREAM_INITIALIZATION_TIMEOUT):
|
|
332
|
+
raise RuntimeError('Stream failed to initialize within timeout')
|
|
333
|
+
|
|
334
|
+
if self._stream_response is None: # pragma: no cover
|
|
335
|
+
raise RuntimeError('Stream failed to initialize')
|
|
336
|
+
|
|
337
|
+
return self._stream_response
|
|
338
|
+
|
|
339
|
+
def _start_producer(self):
|
|
340
|
+
self._thread = threading.Thread(target=self._async_producer, daemon=True)
|
|
341
|
+
self._thread.start()
|
|
342
|
+
|
|
343
|
+
def _async_producer(self):
|
|
344
|
+
async def _consume_async_stream():
|
|
345
|
+
try:
|
|
346
|
+
async with self._async_stream_cm as stream:
|
|
347
|
+
self._stream_response = stream
|
|
348
|
+
# Signal that the stream is ready
|
|
349
|
+
self._stream_ready.set()
|
|
350
|
+
async for event in stream:
|
|
351
|
+
self._queue.put(event)
|
|
352
|
+
except Exception as e:
|
|
353
|
+
# Signal ready even on error so waiting threads don't hang
|
|
354
|
+
self._stream_ready.set()
|
|
355
|
+
self._queue.put(e)
|
|
356
|
+
finally:
|
|
357
|
+
self._queue.put(None) # Signal end
|
|
358
|
+
|
|
359
|
+
_get_event_loop().run_until_complete(_consume_async_stream())
|
|
360
|
+
|
|
361
|
+
def _cleanup(self):
|
|
362
|
+
if self._thread and self._thread.is_alive():
|
|
363
|
+
self._thread.join()
|
|
364
|
+
|
|
365
|
+
def get(self) -> messages.ModelResponse:
|
|
366
|
+
"""Build a ModelResponse from the data received from the stream so far."""
|
|
367
|
+
return self._ensure_stream_ready().get()
|
|
368
|
+
|
|
369
|
+
def usage(self) -> Usage:
|
|
370
|
+
"""Get the usage of the response so far."""
|
|
371
|
+
return self._ensure_stream_ready().usage()
|
|
372
|
+
|
|
373
|
+
@property
|
|
374
|
+
def model_name(self) -> str:
|
|
375
|
+
"""Get the model name of the response."""
|
|
376
|
+
return self._ensure_stream_ready().model_name
|
|
377
|
+
|
|
378
|
+
@property
|
|
379
|
+
def timestamp(self) -> datetime:
|
|
380
|
+
"""Get the timestamp of the response."""
|
|
381
|
+
return self._ensure_stream_ready().timestamp
|
pydantic_ai/ext/aci.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Checking whether aci-sdk is installed
|
|
2
|
+
try:
|
|
3
|
+
from aci import ACI
|
|
4
|
+
except ImportError as _import_error:
|
|
5
|
+
raise ImportError('Please install `aci-sdk` to use ACI.dev tools') from _import_error
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from aci import ACI
|
|
10
|
+
|
|
11
|
+
from pydantic_ai import Tool
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _clean_schema(schema):
|
|
15
|
+
if isinstance(schema, dict):
|
|
16
|
+
# Remove non-standard keys (e.g., 'visible')
|
|
17
|
+
return {k: _clean_schema(v) for k, v in schema.items() if k not in {'visible'}}
|
|
18
|
+
elif isinstance(schema, list):
|
|
19
|
+
return [_clean_schema(item) for item in schema]
|
|
20
|
+
else:
|
|
21
|
+
return schema
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def tool_from_aci(aci_function: str, linked_account_owner_id: str) -> Tool:
|
|
25
|
+
"""Creates a Pydantic AI tool proxy from an ACI function.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
aci_function: The ACI function to wrao.
|
|
29
|
+
linked_account_owner_id: The ACI user ID to execute the function on behalf of.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
A Pydantic AI tool that corresponds to the ACI.dev tool.
|
|
33
|
+
"""
|
|
34
|
+
aci = ACI()
|
|
35
|
+
function_definition = aci.functions.get_definition(aci_function)
|
|
36
|
+
function_name = function_definition['function']['name']
|
|
37
|
+
function_description = function_definition['function']['description']
|
|
38
|
+
inputs = function_definition['function']['parameters']
|
|
39
|
+
|
|
40
|
+
json_schema = {
|
|
41
|
+
'additionalProperties': inputs.get('additionalProperties', False),
|
|
42
|
+
'properties': inputs.get('properties', {}),
|
|
43
|
+
'required': inputs.get('required', []),
|
|
44
|
+
# Default to 'object' if not specified
|
|
45
|
+
'type': inputs.get('type', 'object'),
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# Clean the schema
|
|
49
|
+
json_schema = _clean_schema(json_schema)
|
|
50
|
+
|
|
51
|
+
def implementation(*args: Any, **kwargs: Any) -> str:
|
|
52
|
+
if args:
|
|
53
|
+
raise TypeError('Positional arguments are not allowed')
|
|
54
|
+
return aci.handle_function_call(
|
|
55
|
+
function_name,
|
|
56
|
+
kwargs,
|
|
57
|
+
linked_account_owner_id=linked_account_owner_id,
|
|
58
|
+
allowed_apps_only=True,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
return Tool.from_schema(
|
|
62
|
+
function=implementation,
|
|
63
|
+
name=function_name,
|
|
64
|
+
description=function_description,
|
|
65
|
+
json_schema=json_schema,
|
|
66
|
+
)
|
pydantic_ai/ext/langchain.py
CHANGED
|
@@ -27,13 +27,13 @@ __all__ = ('tool_from_langchain',)
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def tool_from_langchain(langchain_tool: LangChainTool) -> Tool:
|
|
30
|
-
"""Creates a Pydantic tool proxy from a LangChain tool.
|
|
30
|
+
"""Creates a Pydantic AI tool proxy from a LangChain tool.
|
|
31
31
|
|
|
32
32
|
Args:
|
|
33
33
|
langchain_tool: The LangChain tool to wrap.
|
|
34
34
|
|
|
35
35
|
Returns:
|
|
36
|
-
A Pydantic tool that corresponds to the LangChain tool.
|
|
36
|
+
A Pydantic AI tool that corresponds to the LangChain tool.
|
|
37
37
|
"""
|
|
38
38
|
function_name = langchain_tool.name
|
|
39
39
|
function_description = langchain_tool.description
|
pydantic_ai/mcp.py
CHANGED
|
@@ -98,7 +98,7 @@ class MCPServer(ABC):
|
|
|
98
98
|
return [
|
|
99
99
|
tools.ToolDefinition(
|
|
100
100
|
name=self.get_prefixed_tool_name(tool.name),
|
|
101
|
-
description=tool.description
|
|
101
|
+
description=tool.description,
|
|
102
102
|
parameters_json_schema=tool.inputSchema,
|
|
103
103
|
)
|
|
104
104
|
for tool in mcp_tools.tools
|
pydantic_ai/messages.py
CHANGED
|
@@ -25,7 +25,7 @@ if TYPE_CHECKING:
|
|
|
25
25
|
from .models.instrumented import InstrumentationSettings
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
AudioMediaType: TypeAlias = Literal['audio/wav', 'audio/mpeg']
|
|
28
|
+
AudioMediaType: TypeAlias = Literal['audio/wav', 'audio/mpeg', 'audio/ogg', 'audio/flac', 'audio/aiff', 'audio/aac']
|
|
29
29
|
ImageMediaType: TypeAlias = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
|
|
30
30
|
DocumentMediaType: TypeAlias = Literal[
|
|
31
31
|
'application/pdf',
|
|
@@ -48,7 +48,7 @@ VideoMediaType: TypeAlias = Literal[
|
|
|
48
48
|
'video/3gpp',
|
|
49
49
|
]
|
|
50
50
|
|
|
51
|
-
AudioFormat: TypeAlias = Literal['wav', 'mp3']
|
|
51
|
+
AudioFormat: TypeAlias = Literal['wav', 'mp3', 'oga', 'flac', 'aiff', 'aac']
|
|
52
52
|
ImageFormat: TypeAlias = Literal['jpeg', 'png', 'gif', 'webp']
|
|
53
53
|
DocumentFormat: TypeAlias = Literal['csv', 'doc', 'docx', 'html', 'md', 'pdf', 'txt', 'xls', 'xlsx']
|
|
54
54
|
VideoFormat: TypeAlias = Literal['mkv', 'mov', 'mp4', 'webm', 'flv', 'mpeg', 'mpg', 'wmv', 'three_gp']
|
|
@@ -99,6 +99,13 @@ class FileUrl(ABC):
|
|
|
99
99
|
* If False, the URL is sent directly to the model and no download is performed.
|
|
100
100
|
"""
|
|
101
101
|
|
|
102
|
+
vendor_metadata: dict[str, Any] | None = None
|
|
103
|
+
"""Vendor-specific metadata for the file.
|
|
104
|
+
|
|
105
|
+
Supported by:
|
|
106
|
+
- `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
107
|
+
"""
|
|
108
|
+
|
|
102
109
|
@property
|
|
103
110
|
@abstractmethod
|
|
104
111
|
def media_type(self) -> str:
|
|
@@ -175,13 +182,25 @@ class AudioUrl(FileUrl):
|
|
|
175
182
|
|
|
176
183
|
@property
|
|
177
184
|
def media_type(self) -> AudioMediaType:
|
|
178
|
-
"""Return the media type of the audio file, based on the url.
|
|
185
|
+
"""Return the media type of the audio file, based on the url.
|
|
186
|
+
|
|
187
|
+
References:
|
|
188
|
+
- Gemini: https://ai.google.dev/gemini-api/docs/audio#supported-formats
|
|
189
|
+
"""
|
|
179
190
|
if self.url.endswith('.mp3'):
|
|
180
191
|
return 'audio/mpeg'
|
|
181
|
-
|
|
192
|
+
if self.url.endswith('.wav'):
|
|
182
193
|
return 'audio/wav'
|
|
183
|
-
|
|
184
|
-
|
|
194
|
+
if self.url.endswith('.flac'):
|
|
195
|
+
return 'audio/flac'
|
|
196
|
+
if self.url.endswith('.oga'):
|
|
197
|
+
return 'audio/ogg'
|
|
198
|
+
if self.url.endswith('.aiff'):
|
|
199
|
+
return 'audio/aiff'
|
|
200
|
+
if self.url.endswith('.aac'):
|
|
201
|
+
return 'audio/aac'
|
|
202
|
+
|
|
203
|
+
raise ValueError(f'Unknown audio file extension: {self.url}')
|
|
185
204
|
|
|
186
205
|
@property
|
|
187
206
|
def format(self) -> AudioFormat:
|
|
@@ -263,6 +282,13 @@ class BinaryContent:
|
|
|
263
282
|
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
|
|
264
283
|
"""The media type of the binary data."""
|
|
265
284
|
|
|
285
|
+
vendor_metadata: dict[str, Any] | None = None
|
|
286
|
+
"""Vendor-specific metadata for the file.
|
|
287
|
+
|
|
288
|
+
Supported by:
|
|
289
|
+
- `GoogleModel`: `BinaryContent.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
290
|
+
"""
|
|
291
|
+
|
|
266
292
|
kind: Literal['binary'] = 'binary'
|
|
267
293
|
"""Type identifier, this is available on all parts as a discriminator."""
|
|
268
294
|
|
|
@@ -344,6 +370,10 @@ _document_format_lookup: dict[str, DocumentFormat] = {
|
|
|
344
370
|
_audio_format_lookup: dict[str, AudioFormat] = {
|
|
345
371
|
'audio/mpeg': 'mp3',
|
|
346
372
|
'audio/wav': 'wav',
|
|
373
|
+
'audio/flac': 'flac',
|
|
374
|
+
'audio/ogg': 'oga',
|
|
375
|
+
'audio/aiff': 'aiff',
|
|
376
|
+
'audio/aac': 'aac',
|
|
347
377
|
}
|
|
348
378
|
_image_format_lookup: dict[str, ImageFormat] = {
|
|
349
379
|
'image/jpeg': 'jpeg',
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -569,7 +569,17 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
|
|
|
569
569
|
from .cohere import CohereModel
|
|
570
570
|
|
|
571
571
|
return CohereModel(model_name, provider=provider)
|
|
572
|
-
elif provider in (
|
|
572
|
+
elif provider in (
|
|
573
|
+
'openai',
|
|
574
|
+
'deepseek',
|
|
575
|
+
'azure',
|
|
576
|
+
'openrouter',
|
|
577
|
+
'grok',
|
|
578
|
+
'fireworks',
|
|
579
|
+
'together',
|
|
580
|
+
'heroku',
|
|
581
|
+
'github',
|
|
582
|
+
):
|
|
573
583
|
from .openai import OpenAIModel
|
|
574
584
|
|
|
575
585
|
return OpenAIModel(model_name, provider=provider)
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -90,10 +90,9 @@ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models)
|
|
|
90
90
|
|
|
91
91
|
|
|
92
92
|
class AnthropicModelSettings(ModelSettings, total=False):
|
|
93
|
-
"""Settings used for an Anthropic model request.
|
|
93
|
+
"""Settings used for an Anthropic model request."""
|
|
94
94
|
|
|
95
|
-
ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
96
|
-
"""
|
|
95
|
+
# ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
97
96
|
|
|
98
97
|
anthropic_metadata: BetaMetadataParam
|
|
99
98
|
"""An object describing metadata about the request.
|
|
@@ -417,7 +416,7 @@ class AnthropicModel(Model):
|
|
|
417
416
|
def _map_tool_definition(f: ToolDefinition) -> BetaToolParam:
|
|
418
417
|
return {
|
|
419
418
|
'name': f.name,
|
|
420
|
-
'description': f.description,
|
|
419
|
+
'description': f.description or '',
|
|
421
420
|
'input_schema': f.parameters_json_schema,
|
|
422
421
|
}
|
|
423
422
|
|
pydantic_ai/models/bedrock.py
CHANGED
|
@@ -62,6 +62,7 @@ if TYPE_CHECKING:
|
|
|
62
62
|
SystemContentBlockTypeDef,
|
|
63
63
|
ToolChoiceTypeDef,
|
|
64
64
|
ToolConfigurationTypeDef,
|
|
65
|
+
ToolSpecificationTypeDef,
|
|
65
66
|
ToolTypeDef,
|
|
66
67
|
VideoBlockTypeDef,
|
|
67
68
|
)
|
|
@@ -133,12 +134,12 @@ T = typing.TypeVar('T')
|
|
|
133
134
|
class BedrockModelSettings(ModelSettings, total=False):
|
|
134
135
|
"""Settings for Bedrock models.
|
|
135
136
|
|
|
136
|
-
ALL FIELDS MUST BE `bedrock_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
137
|
-
|
|
138
137
|
See [the Bedrock Converse API docs](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html#API_runtime_Converse_RequestSyntax) for a full list.
|
|
139
138
|
See [the boto3 implementation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/converse.html) of the Bedrock Converse API.
|
|
140
139
|
"""
|
|
141
140
|
|
|
141
|
+
# ALL FIELDS MUST BE `bedrock_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
142
|
+
|
|
142
143
|
bedrock_guardrail_config: GuardrailConfigurationTypeDef
|
|
143
144
|
"""Content moderation and safety settings for Bedrock API requests.
|
|
144
145
|
|
|
@@ -228,14 +229,16 @@ class BedrockConverseModel(Model):
|
|
|
228
229
|
|
|
229
230
|
@staticmethod
|
|
230
231
|
def _map_tool_definition(f: ToolDefinition) -> ToolTypeDef:
|
|
231
|
-
|
|
232
|
-
'
|
|
233
|
-
|
|
234
|
-
'description': f.description,
|
|
235
|
-
'inputSchema': {'json': f.parameters_json_schema},
|
|
236
|
-
}
|
|
232
|
+
tool_spec: ToolSpecificationTypeDef = {
|
|
233
|
+
'name': f.name,
|
|
234
|
+
'inputSchema': {'json': f.parameters_json_schema},
|
|
237
235
|
}
|
|
238
236
|
|
|
237
|
+
if f.description: # pragma: no branch
|
|
238
|
+
tool_spec['description'] = f.description
|
|
239
|
+
|
|
240
|
+
return {'toolSpec': tool_spec}
|
|
241
|
+
|
|
239
242
|
@property
|
|
240
243
|
def base_url(self) -> str:
|
|
241
244
|
return str(self.client.meta.endpoint_url)
|
pydantic_ai/models/cohere.py
CHANGED
|
@@ -83,10 +83,9 @@ See [Cohere's docs](https://docs.cohere.com/v2/docs/models) for a list of all av
|
|
|
83
83
|
|
|
84
84
|
|
|
85
85
|
class CohereModelSettings(ModelSettings, total=False):
|
|
86
|
-
"""Settings used for a Cohere model request.
|
|
86
|
+
"""Settings used for a Cohere model request."""
|
|
87
87
|
|
|
88
|
-
ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
89
|
-
"""
|
|
88
|
+
# ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
90
89
|
|
|
91
90
|
# This class is a placeholder for any future cohere-specific settings
|
|
92
91
|
|
pydantic_ai/models/gemini.py
CHANGED
|
@@ -74,10 +74,9 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
|
|
|
74
74
|
|
|
75
75
|
|
|
76
76
|
class GeminiModelSettings(ModelSettings, total=False):
|
|
77
|
-
"""Settings used for a Gemini model request.
|
|
77
|
+
"""Settings used for a Gemini model request."""
|
|
78
78
|
|
|
79
|
-
ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
80
|
-
"""
|
|
79
|
+
# ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
81
80
|
|
|
82
81
|
gemini_safety_settings: list[GeminiSafetySettings]
|
|
83
82
|
"""Safety settings options for Gemini model request."""
|
|
@@ -774,7 +773,7 @@ class _GeminiFunction(TypedDict):
|
|
|
774
773
|
|
|
775
774
|
def _function_from_abstract_tool(tool: ToolDefinition) -> _GeminiFunction:
|
|
776
775
|
json_schema = tool.parameters_json_schema
|
|
777
|
-
f = _GeminiFunction(name=tool.name, description=tool.description, parameters=json_schema)
|
|
776
|
+
f = _GeminiFunction(name=tool.name, description=tool.description or '', parameters=json_schema)
|
|
778
777
|
return f
|
|
779
778
|
|
|
780
779
|
|
pydantic_ai/models/google.py
CHANGED
|
@@ -55,6 +55,7 @@ try:
|
|
|
55
55
|
GenerateContentConfigDict,
|
|
56
56
|
GenerateContentResponse,
|
|
57
57
|
HttpOptionsDict,
|
|
58
|
+
MediaResolution,
|
|
58
59
|
Part,
|
|
59
60
|
PartDict,
|
|
60
61
|
SafetySettingDict,
|
|
@@ -98,10 +99,9 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
|
|
|
98
99
|
|
|
99
100
|
|
|
100
101
|
class GoogleModelSettings(ModelSettings, total=False):
|
|
101
|
-
"""Settings used for a Gemini model request.
|
|
102
|
+
"""Settings used for a Gemini model request."""
|
|
102
103
|
|
|
103
|
-
ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
104
|
-
"""
|
|
104
|
+
# ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
105
105
|
|
|
106
106
|
google_safety_settings: list[SafetySettingDict]
|
|
107
107
|
"""The safety settings to use for the model.
|
|
@@ -121,6 +121,12 @@ class GoogleModelSettings(ModelSettings, total=False):
|
|
|
121
121
|
See the [Gemini API docs](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls) for use cases and limitations.
|
|
122
122
|
"""
|
|
123
123
|
|
|
124
|
+
google_video_resolution: MediaResolution
|
|
125
|
+
"""The video resolution to use for the model.
|
|
126
|
+
|
|
127
|
+
See <https://ai.google.dev/api/generate-content#MediaResolution> for more information.
|
|
128
|
+
"""
|
|
129
|
+
|
|
124
130
|
|
|
125
131
|
@dataclass(init=False)
|
|
126
132
|
class GoogleModel(Model):
|
|
@@ -292,6 +298,7 @@ class GoogleModel(Model):
|
|
|
292
298
|
safety_settings=model_settings.get('google_safety_settings'),
|
|
293
299
|
thinking_config=model_settings.get('google_thinking_config'),
|
|
294
300
|
labels=model_settings.get('google_labels'),
|
|
301
|
+
media_resolution=model_settings.get('google_video_resolution'),
|
|
295
302
|
tools=cast(ToolListUnionDict, tools),
|
|
296
303
|
tool_config=tool_config,
|
|
297
304
|
response_mime_type=response_mime_type,
|
|
@@ -399,9 +406,15 @@ class GoogleModel(Model):
|
|
|
399
406
|
elif isinstance(item, BinaryContent):
|
|
400
407
|
# NOTE: The type from Google GenAI is incorrect, it should be `str`, not `bytes`.
|
|
401
408
|
base64_encoded = base64.b64encode(item.data).decode('utf-8')
|
|
402
|
-
|
|
409
|
+
inline_data_dict = {'inline_data': {'data': base64_encoded, 'mime_type': item.media_type}}
|
|
410
|
+
if item.vendor_metadata:
|
|
411
|
+
inline_data_dict['video_metadata'] = item.vendor_metadata
|
|
412
|
+
content.append(inline_data_dict) # type: ignore
|
|
403
413
|
elif isinstance(item, VideoUrl) and item.is_youtube:
|
|
404
|
-
|
|
414
|
+
file_data_dict = {'file_data': {'file_uri': item.url, 'mime_type': item.media_type}}
|
|
415
|
+
if item.vendor_metadata:
|
|
416
|
+
file_data_dict['video_metadata'] = item.vendor_metadata
|
|
417
|
+
content.append(file_data_dict) # type: ignore
|
|
405
418
|
elif isinstance(item, FileUrl):
|
|
406
419
|
if self.system == 'google-gla' or item.force_download:
|
|
407
420
|
downloaded_item = await download_item(item, data_format='base64')
|
|
@@ -521,7 +534,7 @@ def _function_declaration_from_tool(tool: ToolDefinition) -> FunctionDeclaration
|
|
|
521
534
|
json_schema = tool.parameters_json_schema
|
|
522
535
|
f = FunctionDeclarationDict(
|
|
523
536
|
name=tool.name,
|
|
524
|
-
description=tool.description,
|
|
537
|
+
description=tool.description or '',
|
|
525
538
|
parameters=json_schema, # type: ignore
|
|
526
539
|
)
|
|
527
540
|
return f
|
pydantic_ai/models/groq.py
CHANGED
|
@@ -93,10 +93,9 @@ See <https://console.groq.com/docs/models> for an up to date date list of models
|
|
|
93
93
|
|
|
94
94
|
|
|
95
95
|
class GroqModelSettings(ModelSettings, total=False):
|
|
96
|
-
"""Settings used for a Groq model request.
|
|
96
|
+
"""Settings used for a Groq model request."""
|
|
97
97
|
|
|
98
|
-
ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
99
|
-
"""
|
|
98
|
+
# ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
100
99
|
|
|
101
100
|
groq_reasoning_format: Literal['hidden', 'raw', 'parsed']
|
|
102
101
|
|
|
@@ -334,7 +333,7 @@ class GroqModel(Model):
|
|
|
334
333
|
'type': 'function',
|
|
335
334
|
'function': {
|
|
336
335
|
'name': f.name,
|
|
337
|
-
'description': f.description,
|
|
336
|
+
'description': f.description or '',
|
|
338
337
|
'parameters': f.parameters_json_schema,
|
|
339
338
|
},
|
|
340
339
|
}
|
|
@@ -16,10 +16,9 @@ if TYPE_CHECKING:
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class MCPSamplingModelSettings(ModelSettings, total=False):
|
|
19
|
-
"""Settings used for an MCP Sampling model request.
|
|
19
|
+
"""Settings used for an MCP Sampling model request."""
|
|
20
20
|
|
|
21
|
-
ALL FIELDS MUST BE `mcp_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
22
|
-
"""
|
|
21
|
+
# ALL FIELDS MUST BE `mcp_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
23
22
|
|
|
24
23
|
mcp_model_preferences: ModelPreferences
|
|
25
24
|
"""Model preferences to use for MCP Sampling."""
|
pydantic_ai/models/mistral.py
CHANGED
|
@@ -96,10 +96,9 @@ Since [the Mistral docs](https://docs.mistral.ai/getting-started/models/models_o
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
class MistralModelSettings(ModelSettings, total=False):
|
|
99
|
-
"""Settings used for a Mistral model request.
|
|
99
|
+
"""Settings used for a Mistral model request."""
|
|
100
100
|
|
|
101
|
-
ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
102
|
-
"""
|
|
101
|
+
# ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
103
102
|
|
|
104
103
|
# This class is a placeholder for any future mistral-specific settings
|
|
105
104
|
|
|
@@ -307,7 +306,9 @@ class MistralModel(Model):
|
|
|
307
306
|
)
|
|
308
307
|
tools = [
|
|
309
308
|
MistralTool(
|
|
310
|
-
function=MistralFunction(
|
|
309
|
+
function=MistralFunction(
|
|
310
|
+
name=r.name, parameters=r.parameters_json_schema, description=r.description or ''
|
|
311
|
+
)
|
|
311
312
|
)
|
|
312
313
|
for r in all_tools
|
|
313
314
|
]
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -96,10 +96,9 @@ OpenAISystemPromptRole = Literal['system', 'developer', 'user']
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
class OpenAIModelSettings(ModelSettings, total=False):
|
|
99
|
-
"""Settings used for an OpenAI model request.
|
|
99
|
+
"""Settings used for an OpenAI model request."""
|
|
100
100
|
|
|
101
|
-
ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
102
|
-
"""
|
|
101
|
+
# ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
103
102
|
|
|
104
103
|
openai_reasoning_effort: ReasoningEffort
|
|
105
104
|
"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
|
@@ -190,7 +189,9 @@ class OpenAIModel(Model):
|
|
|
190
189
|
self,
|
|
191
190
|
model_name: OpenAIModelName,
|
|
192
191
|
*,
|
|
193
|
-
provider: Literal[
|
|
192
|
+
provider: Literal[
|
|
193
|
+
'openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together', 'heroku', 'github'
|
|
194
|
+
]
|
|
194
195
|
| Provider[AsyncOpenAI] = 'openai',
|
|
195
196
|
profile: ModelProfileSpec | None = None,
|
|
196
197
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
@@ -468,7 +469,7 @@ class OpenAIModel(Model):
|
|
|
468
469
|
'type': 'function',
|
|
469
470
|
'function': {
|
|
470
471
|
'name': f.name,
|
|
471
|
-
'description': f.description,
|
|
472
|
+
'description': f.description or '',
|
|
472
473
|
'parameters': f.parameters_json_schema,
|
|
473
474
|
},
|
|
474
475
|
}
|
pydantic_ai/profiles/openai.py
CHANGED
|
@@ -93,10 +93,18 @@ class OpenAIJsonSchemaTransformer(JsonSchemaTransformer):
|
|
|
93
93
|
def transform(self, schema: JsonSchema) -> JsonSchema: # noqa C901
|
|
94
94
|
# Remove unnecessary keys
|
|
95
95
|
schema.pop('title', None)
|
|
96
|
-
schema.pop('default', None)
|
|
97
96
|
schema.pop('$schema', None)
|
|
98
97
|
schema.pop('discriminator', None)
|
|
99
98
|
|
|
99
|
+
default = schema.get('default', _sentinel)
|
|
100
|
+
if default is not _sentinel:
|
|
101
|
+
# the "default" keyword is not allowed in strict mode, but including it makes some Ollama models behave
|
|
102
|
+
# better, so we keep it around when not strict
|
|
103
|
+
if self.strict is True:
|
|
104
|
+
schema.pop('default', None)
|
|
105
|
+
elif self.strict is None: # pragma: no branch
|
|
106
|
+
self.is_strict_compatible = False
|
|
107
|
+
|
|
100
108
|
if schema_ref := schema.get('$ref'):
|
|
101
109
|
if schema_ref == self.root_ref:
|
|
102
110
|
schema['$ref'] = '#'
|
|
@@ -111,6 +111,10 @@ def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
|
|
|
111
111
|
from .heroku import HerokuProvider
|
|
112
112
|
|
|
113
113
|
return HerokuProvider
|
|
114
|
+
elif provider == 'github':
|
|
115
|
+
from .github import GitHubProvider
|
|
116
|
+
|
|
117
|
+
return GitHubProvider
|
|
114
118
|
else: # pragma: no cover
|
|
115
119
|
raise ValueError(f'Unknown provider: {provider}')
|
|
116
120
|
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import overload
|
|
5
|
+
|
|
6
|
+
from httpx import AsyncClient as AsyncHTTPClient
|
|
7
|
+
|
|
8
|
+
from pydantic_ai.exceptions import UserError
|
|
9
|
+
from pydantic_ai.models import cached_async_http_client
|
|
10
|
+
from pydantic_ai.profiles import ModelProfile
|
|
11
|
+
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
12
|
+
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
|
+
from pydantic_ai.profiles.grok import grok_model_profile
|
|
14
|
+
from pydantic_ai.profiles.meta import meta_model_profile
|
|
15
|
+
from pydantic_ai.profiles.mistral import mistral_model_profile
|
|
16
|
+
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile, openai_model_profile
|
|
17
|
+
from pydantic_ai.providers import Provider
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
from openai import AsyncOpenAI
|
|
21
|
+
except ImportError as _import_error: # pragma: no cover
|
|
22
|
+
raise ImportError(
|
|
23
|
+
'Please install the `openai` package to use the GitHub Models provider, '
|
|
24
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
25
|
+
) from _import_error
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class GitHubProvider(Provider[AsyncOpenAI]):
|
|
29
|
+
"""Provider for GitHub Models API.
|
|
30
|
+
|
|
31
|
+
GitHub Models provides access to various AI models through an OpenAI-compatible API.
|
|
32
|
+
See <https://docs.github.com/en/github-models> for more information.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def name(self) -> str:
|
|
37
|
+
return 'github'
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def base_url(self) -> str:
|
|
41
|
+
return 'https://models.github.ai/inference'
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def client(self) -> AsyncOpenAI:
|
|
45
|
+
return self._client
|
|
46
|
+
|
|
47
|
+
def model_profile(self, model_name: str) -> ModelProfile | None:
|
|
48
|
+
provider_to_profile = {
|
|
49
|
+
'xai': grok_model_profile,
|
|
50
|
+
'meta': meta_model_profile,
|
|
51
|
+
'microsoft': openai_model_profile,
|
|
52
|
+
'mistral-ai': mistral_model_profile,
|
|
53
|
+
'cohere': cohere_model_profile,
|
|
54
|
+
'deepseek': deepseek_model_profile,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
profile = None
|
|
58
|
+
|
|
59
|
+
# If the model name does not contain a provider prefix, we assume it's an OpenAI model
|
|
60
|
+
if '/' not in model_name:
|
|
61
|
+
return openai_model_profile(model_name)
|
|
62
|
+
|
|
63
|
+
provider, model_name = model_name.lower().split('/', 1)
|
|
64
|
+
if provider in provider_to_profile:
|
|
65
|
+
model_name, *_ = model_name.split(':', 1) # drop tags
|
|
66
|
+
profile = provider_to_profile[provider](model_name)
|
|
67
|
+
|
|
68
|
+
# As GitHubProvider is always used with OpenAIModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
|
|
69
|
+
# we need to maintain that behavior unless json_schema_transformer is set explicitly
|
|
70
|
+
return OpenAIModelProfile(json_schema_transformer=OpenAIJsonSchemaTransformer).update(profile)
|
|
71
|
+
|
|
72
|
+
@overload
|
|
73
|
+
def __init__(self) -> None: ...
|
|
74
|
+
|
|
75
|
+
@overload
|
|
76
|
+
def __init__(self, *, api_key: str) -> None: ...
|
|
77
|
+
|
|
78
|
+
@overload
|
|
79
|
+
def __init__(self, *, api_key: str, http_client: AsyncHTTPClient) -> None: ...
|
|
80
|
+
|
|
81
|
+
@overload
|
|
82
|
+
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
api_key: str | None = None,
|
|
88
|
+
openai_client: AsyncOpenAI | None = None,
|
|
89
|
+
http_client: AsyncHTTPClient | None = None,
|
|
90
|
+
) -> None:
|
|
91
|
+
"""Create a new GitHub Models provider.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
api_key: The GitHub token to use for authentication. If not provided, the `GITHUB_API_KEY`
|
|
95
|
+
environment variable will be used if available.
|
|
96
|
+
openai_client: An existing `AsyncOpenAI` client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
97
|
+
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
98
|
+
"""
|
|
99
|
+
api_key = api_key or os.getenv('GITHUB_API_KEY')
|
|
100
|
+
if not api_key and openai_client is None:
|
|
101
|
+
raise UserError(
|
|
102
|
+
'Set the `GITHUB_API_KEY` environment variable or pass it via `GitHubProvider(api_key=...)`'
|
|
103
|
+
' to use the GitHub Models provider.'
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if openai_client is not None:
|
|
107
|
+
self._client = openai_client
|
|
108
|
+
elif http_client is not None:
|
|
109
|
+
self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
|
110
|
+
else:
|
|
111
|
+
http_client = cached_async_http_client(provider='github')
|
|
112
|
+
self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
pydantic_ai/result.py
CHANGED
|
@@ -59,7 +59,12 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
59
59
|
"""Asynchronously stream the (validated) agent outputs."""
|
|
60
60
|
async for response in self.stream_responses(debounce_by=debounce_by):
|
|
61
61
|
if self._final_result_event is not None:
|
|
62
|
-
|
|
62
|
+
try:
|
|
63
|
+
yield await self._validate_response(
|
|
64
|
+
response, self._final_result_event.tool_name, allow_partial=True
|
|
65
|
+
)
|
|
66
|
+
except ValidationError:
|
|
67
|
+
pass
|
|
63
68
|
if self._final_result_event is not None: # pragma: no branch
|
|
64
69
|
yield await self._validate_response(
|
|
65
70
|
self._raw_stream_response.get(), self._final_result_event.tool_name, allow_partial=False
|
|
@@ -546,6 +551,7 @@ def coalesce_deprecated_return_content(
|
|
|
546
551
|
warnings.warn(
|
|
547
552
|
'`result_tool_return_content` is deprecated, use `output_tool_return_content` instead.',
|
|
548
553
|
DeprecationWarning,
|
|
554
|
+
stacklevel=3,
|
|
549
555
|
)
|
|
550
556
|
return result_tool_return_content
|
|
551
557
|
return output_tool_return_content
|
pydantic_ai/tools.py
CHANGED
|
@@ -161,7 +161,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
161
161
|
takes_ctx: bool
|
|
162
162
|
max_retries: int | None
|
|
163
163
|
name: str
|
|
164
|
-
description: str
|
|
164
|
+
description: str | None
|
|
165
165
|
prepare: ToolPrepareFunc[AgentDepsT] | None
|
|
166
166
|
docstring_format: DocstringFormat
|
|
167
167
|
require_parameter_descriptions: bool
|
|
@@ -269,7 +269,7 @@ class Tool(Generic[AgentDepsT]):
|
|
|
269
269
|
cls,
|
|
270
270
|
function: Callable[..., Any],
|
|
271
271
|
name: str,
|
|
272
|
-
description: str,
|
|
272
|
+
description: str | None,
|
|
273
273
|
json_schema: JsonSchemaValue,
|
|
274
274
|
) -> Self:
|
|
275
275
|
"""Creates a Pydantic tool from a function and a JSON schema.
|
|
@@ -440,12 +440,12 @@ class ToolDefinition:
|
|
|
440
440
|
name: str
|
|
441
441
|
"""The name of the tool."""
|
|
442
442
|
|
|
443
|
-
description: str
|
|
444
|
-
"""The description of the tool."""
|
|
445
|
-
|
|
446
443
|
parameters_json_schema: ObjectJsonSchema
|
|
447
444
|
"""The JSON schema for the tool's parameters."""
|
|
448
445
|
|
|
446
|
+
description: str | None = None
|
|
447
|
+
"""The description of the tool."""
|
|
448
|
+
|
|
449
449
|
outer_typed_dict_key: str | None = None
|
|
450
450
|
"""The key in the outer [TypedDict] that wraps an output tool.
|
|
451
451
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.4.0
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.
|
|
37
|
+
Requires-Dist: fasta2a==0.4.0; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,9 +48,9 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.4.0; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
|
-
Requires-Dist: google-genai>=1.
|
|
53
|
+
Requires-Dist: google-genai>=1.24.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
55
55
|
Requires-Dist: groq>=0.19.0; extra == 'groq'
|
|
56
56
|
Provides-Extra: logfire
|
|
@@ -3,46 +3,47 @@ pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
|
3
3
|
pydantic_ai/_a2a.py,sha256=8nNtx6GENDt2Ej3f1ui9L-FuNQBYVELpJFfwz-y7fUw,7234
|
|
4
4
|
pydantic_ai/_agent_graph.py,sha256=rtzyBXN4bzEDBeRkRwF031ORktSMbuGz9toZmSqUxNI,42153
|
|
5
5
|
pydantic_ai/_cli.py,sha256=R-sE-9gYqPxV5-5utso4g-bzAKMiTCdo33XOVqE0ZEg,13206
|
|
6
|
-
pydantic_ai/_function_schema.py,sha256=
|
|
7
|
-
pydantic_ai/_griffe.py,sha256=
|
|
6
|
+
pydantic_ai/_function_schema.py,sha256=BZus5y51eqiGQKxQIcCiDoSPml3AtAb12-st_aujU2k,10813
|
|
7
|
+
pydantic_ai/_griffe.py,sha256=Ugft16ZHw9CN_6-lW0Svn6jESK9zHXO_x4utkGBkbBI,5253
|
|
8
8
|
pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
|
|
9
9
|
pydantic_ai/_output.py,sha256=8qOx2hEwxpcoS5P8OLqOAWj94KfODDVqrPHnEIhI-90,33164
|
|
10
10
|
pydantic_ai/_parts_manager.py,sha256=Lioi8b7Nfyax09yQu8jTkMzxd26dYDrdAqhYvjRSKqQ,16182
|
|
11
11
|
pydantic_ai/_run_context.py,sha256=zNkSyiQSH-YweO39ii3iB2taouUOodo3sTjz2Lrj4Pc,1792
|
|
12
12
|
pydantic_ai/_system_prompt.py,sha256=lUSq-gDZjlYTGtd6BUm54yEvTIvgdwBmJ8mLsNZZtYU,1142
|
|
13
13
|
pydantic_ai/_thinking_part.py,sha256=mzx2RZSfiQxAKpljEflrcXRXmFKxtp6bKVyorY3UYZk,1554
|
|
14
|
-
pydantic_ai/_utils.py,sha256=
|
|
15
|
-
pydantic_ai/agent.py,sha256=
|
|
16
|
-
pydantic_ai/direct.py,sha256=
|
|
14
|
+
pydantic_ai/_utils.py,sha256=SGXEiGCnMae1Iz_eZKUs6ni_tGMPkDaJ4W3W3YMoP5w,15545
|
|
15
|
+
pydantic_ai/agent.py,sha256=Fs-bm9eeCvanwiKiD-IS_XLcMmgNWucJylXgrIDH6WM,96186
|
|
16
|
+
pydantic_ai/direct.py,sha256=WRfgke3zm-eeR39LTuh9XI2TrdHXAqO81eDvFwih4Ko,14803
|
|
17
17
|
pydantic_ai/exceptions.py,sha256=IdFw594Ou7Vn4YFa7xdZ040_j_6nmyA3MPANbC7sys4,3175
|
|
18
18
|
pydantic_ai/format_as_xml.py,sha256=IINfh1evWDphGahqHNLBArB5dQ4NIqS3S-kru35ztGg,372
|
|
19
19
|
pydantic_ai/format_prompt.py,sha256=qdKep95Sjlr7u1-qag4JwPbjoURbG0GbeU_l5ODTNw4,4466
|
|
20
|
-
pydantic_ai/mcp.py,sha256=
|
|
21
|
-
pydantic_ai/messages.py,sha256=
|
|
20
|
+
pydantic_ai/mcp.py,sha256=6RvxXIn6bUlL2XWpX69i8G3atU-HLLZBgKc93dYqeVo,21830
|
|
21
|
+
pydantic_ai/messages.py,sha256=ykB4jzDwPGFkgQSJagOdurBv5-DTtCaY-y9671FYz7E,39256
|
|
22
22
|
pydantic_ai/output.py,sha256=gq-8H2YKgbKSTxp_HUMym57ZUkwupHyS4sCOzedlXTI,9315
|
|
23
23
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
|
-
pydantic_ai/result.py,sha256=
|
|
24
|
+
pydantic_ai/result.py,sha256=GVzXf7yjR2lKBDw9k-8PlhJgCpE3dVHiyLL0dFPvs7I,25603
|
|
25
25
|
pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
|
|
26
|
-
pydantic_ai/tools.py,sha256=
|
|
26
|
+
pydantic_ai/tools.py,sha256=ZZ5DZMzSLMZkM9y_G3fx5YnVTki6daPYgRkfuNXAQ-M,17774
|
|
27
27
|
pydantic_ai/usage.py,sha256=35YPmItlzfNOwP35Rhh0qBUOlg5On5rUE7xqHQWrpaU,5596
|
|
28
28
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
29
|
pydantic_ai/common_tools/duckduckgo.py,sha256=Ty9tu1rCwMfGKgz1JAaC2q_4esmL6QvpkHQUN8F0Ecc,2152
|
|
30
30
|
pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQXD7E4,2495
|
|
31
31
|
pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
-
pydantic_ai/ext/
|
|
33
|
-
pydantic_ai/
|
|
34
|
-
pydantic_ai/models/
|
|
35
|
-
pydantic_ai/models/
|
|
36
|
-
pydantic_ai/models/
|
|
32
|
+
pydantic_ai/ext/aci.py,sha256=eiuWamUh90kexWyuGw_Fw2kM-EAA6Pv-IfNhf5hQ8fs,2123
|
|
33
|
+
pydantic_ai/ext/langchain.py,sha256=iSyACZiJDDvxr0BKYl9dLxe4BPezCBHxgz_2Vk3W-Ak,1973
|
|
34
|
+
pydantic_ai/models/__init__.py,sha256=B8vG0crUDCO3Bvd8fVeMNPzZH2Un61rEJFxSaumoUl4,29101
|
|
35
|
+
pydantic_ai/models/anthropic.py,sha256=ooRh6Yh0jLj78IKjgaYTN0UbB2Ku8ZhuEBi8v8kymoE,23679
|
|
36
|
+
pydantic_ai/models/bedrock.py,sha256=i8BNOFEYGiRYA4ZEFwqHzJHf3EP54akVzZHdEUJohiw,29234
|
|
37
|
+
pydantic_ai/models/cohere.py,sha256=qgYegjfOsqXbRcjXCbg0jaexbuxh1SrS9_mZdzzJVbM,12623
|
|
37
38
|
pydantic_ai/models/fallback.py,sha256=sTYw8wW8iGgFIPG2Oynsucb9orG6wbV_h-9k5vKil4I,5103
|
|
38
39
|
pydantic_ai/models/function.py,sha256=nfCjRmbcF7sdK_nsak1fvzz9Xkptx5WhsxvWdB02zec,12113
|
|
39
|
-
pydantic_ai/models/gemini.py,sha256=
|
|
40
|
-
pydantic_ai/models/google.py,sha256=
|
|
41
|
-
pydantic_ai/models/groq.py,sha256=
|
|
40
|
+
pydantic_ai/models/gemini.py,sha256=22qucwayi8x20yvZY6qeHH4WRyEObfIkrCQ5cluejdQ,38488
|
|
41
|
+
pydantic_ai/models/google.py,sha256=PFioCPeuf5_f80s9NiRSxFZawvfYbUUhpaW7mUg8frg,24072
|
|
42
|
+
pydantic_ai/models/groq.py,sha256=tmYTPKsMMhtIms_9muPKYQvGZ98b_kax7t8H1YE1vPU,18500
|
|
42
43
|
pydantic_ai/models/instrumented.py,sha256=olTa7Fl2BwHLvTLT6sSrS2HOS7UyWg182Xujx8hutBw,15947
|
|
43
|
-
pydantic_ai/models/mcp_sampling.py,sha256=
|
|
44
|
-
pydantic_ai/models/mistral.py,sha256=
|
|
45
|
-
pydantic_ai/models/openai.py,sha256=
|
|
44
|
+
pydantic_ai/models/mcp_sampling.py,sha256=q9nnjNEAAbhrfRc_Qw5z9TtCHMG_SwlCWW9FvKWjh8k,3395
|
|
45
|
+
pydantic_ai/models/mistral.py,sha256=d_TQjSQukSztNt6JpFQCqugYTxXQ97GaQBc3zUxOSSA,30555
|
|
46
|
+
pydantic_ai/models/openai.py,sha256=ReqpM4gdM0TPSwUCGu2L8VoBFsxy2Y-8PRFhI6d5KcI,53646
|
|
46
47
|
pydantic_ai/models/test.py,sha256=STNd79ZoCyyphm0eFRNDoTpvkOzhw1qFw1zgv44kqsg,17441
|
|
47
48
|
pydantic_ai/models/wrapper.py,sha256=2g06TxE5kFqfaJCwsDJHp7Rltoj0XXH0OzdpRDOcqNo,1861
|
|
48
49
|
pydantic_ai/profiles/__init__.py,sha256=BXMqUpgRfosmYgcxjKAI9ESCj47JTSa30DhKXEgVLzM,2419
|
|
@@ -55,15 +56,16 @@ pydantic_ai/profiles/google.py,sha256=DJ0otpkCgVIrjwV2lzAUAejw8ivwZT9pNAY_sGRcrV
|
|
|
55
56
|
pydantic_ai/profiles/grok.py,sha256=nBOxOCYCK9aiLmz2Q-esqYhotNbbBC1boAoOYIk1tVw,211
|
|
56
57
|
pydantic_ai/profiles/meta.py,sha256=IAGPoUrLWd-g9ajAgpWp9fIeOrP-7dBlZ2HEFjIhUbY,334
|
|
57
58
|
pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
|
|
58
|
-
pydantic_ai/profiles/openai.py,sha256=
|
|
59
|
+
pydantic_ai/profiles/openai.py,sha256=wFFtzbM22HbxxRNDXYEs6tr6_RSbv8xN_xBPz6RsP9s,6698
|
|
59
60
|
pydantic_ai/profiles/qwen.py,sha256=u7pL8uomoQTVl45g5wDrHx0P_oFDLaN6ALswuwmkWc0,334
|
|
60
|
-
pydantic_ai/providers/__init__.py,sha256=
|
|
61
|
+
pydantic_ai/providers/__init__.py,sha256=JNsVZ1PBx_9hUJZbnoRIDJCkWbrJbk69w-SFqjoG-6c,3654
|
|
61
62
|
pydantic_ai/providers/anthropic.py,sha256=D35UXxCPXv8yIbD0fj9Zg2FvNyoMoJMeDUtVM8Sn78I,3046
|
|
62
63
|
pydantic_ai/providers/azure.py,sha256=y77IHGiSQ9Ttx9f4SGMgdpin2Daq6eYyzUdM9ET22RQ,5819
|
|
63
64
|
pydantic_ai/providers/bedrock.py,sha256=ycdTXnkj_WNqPMA7DNDPeYia0C37FP0_l0CygSQmWYI,5694
|
|
64
65
|
pydantic_ai/providers/cohere.py,sha256=LT6QaLPJBBlFUgYgXQOfKpbM9SXLzorWFxI7jNfOX_4,2892
|
|
65
66
|
pydantic_ai/providers/deepseek.py,sha256=kUdM8eVp1lse4bS_uy70Gy7wgog94NTZ36GY-vhSB50,3060
|
|
66
67
|
pydantic_ai/providers/fireworks.py,sha256=TPbqOpNgXG59qovBaHWbbV2vsvROwlHwQ3PvqHUBH-s,3626
|
|
68
|
+
pydantic_ai/providers/github.py,sha256=zPu3oVJKjUE4zIqZ0YfgcTFBNdEy5rIBrSOdPCHJEG4,4406
|
|
67
69
|
pydantic_ai/providers/google.py,sha256=eAELGtZDArdmYMVnyHLqJdOMBvMd_qLGUa4m1TSKWso,5994
|
|
68
70
|
pydantic_ai/providers/google_gla.py,sha256=BCF5_6EVtpkCZ6qIDuvgY1Qa9EirS71l51CBqPqk4C4,1825
|
|
69
71
|
pydantic_ai/providers/google_vertex.py,sha256=_uiPHisYbQJxygESUUsRKBIG-DjeTwEQVvioS4JpEXc,9446
|
|
@@ -74,8 +76,8 @@ pydantic_ai/providers/mistral.py,sha256=EIUSENjFuGzBhvbdrarUTM4VPkesIMnZrzfnEKHO
|
|
|
74
76
|
pydantic_ai/providers/openai.py,sha256=7iGij0EaFylab7dTZAZDgXr78tr-HsZrn9EI9AkWBNQ,3091
|
|
75
77
|
pydantic_ai/providers/openrouter.py,sha256=NXjNdnlXIBrBMMqbzcWQnowXOuZh4NHikXenBn5h3mc,4061
|
|
76
78
|
pydantic_ai/providers/together.py,sha256=zFVSMSm5jXbpkNouvBOTjWrPmlPpCp6sQS5LMSyVjrQ,3482
|
|
77
|
-
pydantic_ai_slim-0.
|
|
78
|
-
pydantic_ai_slim-0.
|
|
79
|
-
pydantic_ai_slim-0.
|
|
80
|
-
pydantic_ai_slim-0.
|
|
81
|
-
pydantic_ai_slim-0.
|
|
79
|
+
pydantic_ai_slim-0.4.0.dist-info/METADATA,sha256=S-ygqOZ0lpsazK_VGyrj8B6l1H9Q7B2bCGYRtmmK4T8,3846
|
|
80
|
+
pydantic_ai_slim-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
81
|
+
pydantic_ai_slim-0.4.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
82
|
+
pydantic_ai_slim-0.4.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
83
|
+
pydantic_ai_slim-0.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|