pydantic-ai-slim 0.3.6__tar.gz → 0.3.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/PKG-INFO +5 -5
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/agent.py +15 -7
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/direct.py +191 -3
- pydantic_ai_slim-0.3.7/pydantic_ai/ext/aci.py +66 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/ext/langchain.py +2 -2
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/messages.py +14 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/__init__.py +11 -1
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/anthropic.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/bedrock.py +2 -2
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/cohere.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/gemini.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/google.py +18 -5
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/groq.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/mcp_sampling.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/mistral.py +2 -3
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/openai.py +5 -4
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai_slim-0.3.7/pydantic_ai/providers/github.py +112 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/result.py +7 -1
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pyproject.toml +1 -1
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/.gitignore +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/LICENSE +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/README.md +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_a2a.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/instrumented.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.3.6 → pydantic_ai_slim-0.3.7}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.7
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.3.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.3.7
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
37
|
-
Requires-Dist: fasta2a==0.3.
|
|
37
|
+
Requires-Dist: fasta2a==0.3.7; extra == 'a2a'
|
|
38
38
|
Provides-Extra: anthropic
|
|
39
39
|
Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
|
|
40
40
|
Provides-Extra: bedrock
|
|
@@ -48,9 +48,9 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
48
48
|
Provides-Extra: duckduckgo
|
|
49
49
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
50
50
|
Provides-Extra: evals
|
|
51
|
-
Requires-Dist: pydantic-evals==0.3.
|
|
51
|
+
Requires-Dist: pydantic-evals==0.3.7; extra == 'evals'
|
|
52
52
|
Provides-Extra: google
|
|
53
|
-
Requires-Dist: google-genai>=1.
|
|
53
|
+
Requires-Dist: google-genai>=1.24.0; extra == 'google'
|
|
54
54
|
Provides-Extra: groq
|
|
55
55
|
Requires-Dist: groq>=0.19.0; extra == 'groq'
|
|
56
56
|
Provides-Extra: logfire
|
|
@@ -296,7 +296,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
296
296
|
if 'result_type' in _deprecated_kwargs:
|
|
297
297
|
if output_type is not str: # pragma: no cover
|
|
298
298
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
299
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
|
|
299
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning, stacklevel=2)
|
|
300
300
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
301
301
|
|
|
302
302
|
self.output_type = output_type
|
|
@@ -310,6 +310,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
310
310
|
warnings.warn(
|
|
311
311
|
'`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
312
312
|
DeprecationWarning,
|
|
313
|
+
stacklevel=2,
|
|
313
314
|
)
|
|
314
315
|
|
|
315
316
|
self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
|
|
@@ -317,12 +318,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
317
318
|
warnings.warn(
|
|
318
319
|
'`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
|
|
319
320
|
DeprecationWarning,
|
|
321
|
+
stacklevel=2,
|
|
320
322
|
)
|
|
321
323
|
result_retries = _deprecated_kwargs.pop('result_retries', None)
|
|
322
324
|
if result_retries is not None:
|
|
323
325
|
if output_retries is not None: # pragma: no cover
|
|
324
326
|
raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
|
|
325
|
-
warnings.warn(
|
|
327
|
+
warnings.warn(
|
|
328
|
+
'`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning, stacklevel=2
|
|
329
|
+
)
|
|
326
330
|
output_retries = result_retries
|
|
327
331
|
|
|
328
332
|
default_output_mode = (
|
|
@@ -472,7 +476,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
472
476
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
473
477
|
if output_type is not str:
|
|
474
478
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
475
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
479
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
476
480
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
477
481
|
|
|
478
482
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -640,7 +644,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
640
644
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
641
645
|
if output_type is not str:
|
|
642
646
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
643
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
647
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
644
648
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
645
649
|
|
|
646
650
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -879,7 +883,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
879
883
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
880
884
|
if output_type is not str:
|
|
881
885
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
882
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
886
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
883
887
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
884
888
|
|
|
885
889
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -997,7 +1001,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
997
1001
|
if 'result_type' in _deprecated_kwargs: # pragma: no cover
|
|
998
1002
|
if output_type is not str:
|
|
999
1003
|
raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
|
|
1000
|
-
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
|
|
1004
|
+
warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
|
|
1001
1005
|
output_type = _deprecated_kwargs.pop('result_type')
|
|
1002
1006
|
|
|
1003
1007
|
_utils.validate_empty_kwargs(_deprecated_kwargs)
|
|
@@ -1336,7 +1340,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
1336
1340
|
return func
|
|
1337
1341
|
|
|
1338
1342
|
@deprecated('`result_validator` is deprecated, use `output_validator` instead.')
|
|
1339
|
-
def result_validator(self, func: Any, /) -> Any:
|
|
1343
|
+
def result_validator(self, func: Any, /) -> Any:
|
|
1344
|
+
warnings.warn(
|
|
1345
|
+
'`result_validator` is deprecated, use `output_validator` instead.', DeprecationWarning, stacklevel=2
|
|
1346
|
+
)
|
|
1347
|
+
return self.output_validator(func) # type: ignore
|
|
1340
1348
|
|
|
1341
1349
|
@overload
|
|
1342
1350
|
def tool(self, func: ToolFuncContext[AgentDepsT, ToolParams], /) -> ToolFuncContext[AgentDepsT, ToolParams]: ...
|
|
@@ -8,14 +8,29 @@ These methods are thin wrappers around [`Model`][pydantic_ai.models.Model] imple
|
|
|
8
8
|
|
|
9
9
|
from __future__ import annotations as _annotations
|
|
10
10
|
|
|
11
|
+
import queue
|
|
12
|
+
import threading
|
|
13
|
+
from collections.abc import Iterator
|
|
11
14
|
from contextlib import AbstractAsyncContextManager
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from types import TracebackType
|
|
12
18
|
|
|
19
|
+
from pydantic_ai.usage import Usage
|
|
13
20
|
from pydantic_graph._utils import get_event_loop as _get_event_loop
|
|
14
21
|
|
|
15
22
|
from . import agent, messages, models, settings
|
|
16
|
-
from .models import instrumented as instrumented_models
|
|
23
|
+
from .models import StreamedResponse, instrumented as instrumented_models
|
|
17
24
|
|
|
18
|
-
__all__ =
|
|
25
|
+
__all__ = (
|
|
26
|
+
'model_request',
|
|
27
|
+
'model_request_sync',
|
|
28
|
+
'model_request_stream',
|
|
29
|
+
'model_request_stream_sync',
|
|
30
|
+
'StreamedResponseSync',
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
STREAM_INITIALIZATION_TIMEOUT = 30
|
|
19
34
|
|
|
20
35
|
|
|
21
36
|
async def model_request(
|
|
@@ -144,7 +159,7 @@ def model_request_stream(
|
|
|
144
159
|
|
|
145
160
|
async def main():
|
|
146
161
|
messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')] # (1)!
|
|
147
|
-
async with model_request_stream(
|
|
162
|
+
async with model_request_stream('openai:gpt-4.1-mini', messages) as stream:
|
|
148
163
|
chunks = []
|
|
149
164
|
async for chunk in stream:
|
|
150
165
|
chunks.append(chunk)
|
|
@@ -181,6 +196,63 @@ def model_request_stream(
|
|
|
181
196
|
)
|
|
182
197
|
|
|
183
198
|
|
|
199
|
+
def model_request_stream_sync(
|
|
200
|
+
model: models.Model | models.KnownModelName | str,
|
|
201
|
+
messages: list[messages.ModelMessage],
|
|
202
|
+
*,
|
|
203
|
+
model_settings: settings.ModelSettings | None = None,
|
|
204
|
+
model_request_parameters: models.ModelRequestParameters | None = None,
|
|
205
|
+
instrument: instrumented_models.InstrumentationSettings | bool | None = None,
|
|
206
|
+
) -> StreamedResponseSync:
|
|
207
|
+
"""Make a streamed synchronous request to a model.
|
|
208
|
+
|
|
209
|
+
This is the synchronous version of [`model_request_stream`][pydantic_ai.direct.model_request_stream].
|
|
210
|
+
It uses threading to run the asynchronous stream in the background while providing a synchronous iterator interface.
|
|
211
|
+
|
|
212
|
+
```py {title="model_request_stream_sync_example.py"}
|
|
213
|
+
|
|
214
|
+
from pydantic_ai.direct import model_request_stream_sync
|
|
215
|
+
from pydantic_ai.messages import ModelRequest
|
|
216
|
+
|
|
217
|
+
messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
|
|
218
|
+
with model_request_stream_sync('openai:gpt-4.1-mini', messages) as stream:
|
|
219
|
+
chunks = []
|
|
220
|
+
for chunk in stream:
|
|
221
|
+
chunks.append(chunk)
|
|
222
|
+
print(chunks)
|
|
223
|
+
'''
|
|
224
|
+
[
|
|
225
|
+
PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
|
|
226
|
+
PartDeltaEvent(
|
|
227
|
+
index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
|
|
228
|
+
),
|
|
229
|
+
PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
|
|
230
|
+
]
|
|
231
|
+
'''
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
|
|
236
|
+
messages: Messages to send to the model
|
|
237
|
+
model_settings: optional model settings
|
|
238
|
+
model_request_parameters: optional model request parameters
|
|
239
|
+
instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
|
|
240
|
+
[`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
A [sync stream response][pydantic_ai.direct.StreamedResponseSync] context manager.
|
|
244
|
+
"""
|
|
245
|
+
async_stream_cm = model_request_stream(
|
|
246
|
+
model=model,
|
|
247
|
+
messages=messages,
|
|
248
|
+
model_settings=model_settings,
|
|
249
|
+
model_request_parameters=model_request_parameters,
|
|
250
|
+
instrument=instrument,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return StreamedResponseSync(async_stream_cm)
|
|
254
|
+
|
|
255
|
+
|
|
184
256
|
def _prepare_model(
|
|
185
257
|
model: models.Model | models.KnownModelName | str,
|
|
186
258
|
instrument: instrumented_models.InstrumentationSettings | bool | None,
|
|
@@ -191,3 +263,119 @@ def _prepare_model(
|
|
|
191
263
|
instrument = agent.Agent._instrument_default # pyright: ignore[reportPrivateUsage]
|
|
192
264
|
|
|
193
265
|
return instrumented_models.instrument_model(model_instance, instrument)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@dataclass
|
|
269
|
+
class StreamedResponseSync:
|
|
270
|
+
"""Synchronous wrapper to async streaming responses by running the async producer in a background thread and providing a synchronous iterator.
|
|
271
|
+
|
|
272
|
+
This class must be used as a context manager with the `with` statement.
|
|
273
|
+
"""
|
|
274
|
+
|
|
275
|
+
_async_stream_cm: AbstractAsyncContextManager[StreamedResponse]
|
|
276
|
+
_queue: queue.Queue[messages.ModelResponseStreamEvent | Exception | None] = field(
|
|
277
|
+
default_factory=queue.Queue, init=False
|
|
278
|
+
)
|
|
279
|
+
_thread: threading.Thread | None = field(default=None, init=False)
|
|
280
|
+
_stream_response: StreamedResponse | None = field(default=None, init=False)
|
|
281
|
+
_exception: Exception | None = field(default=None, init=False)
|
|
282
|
+
_context_entered: bool = field(default=False, init=False)
|
|
283
|
+
_stream_ready: threading.Event = field(default_factory=threading.Event, init=False)
|
|
284
|
+
|
|
285
|
+
def __enter__(self) -> StreamedResponseSync:
|
|
286
|
+
self._context_entered = True
|
|
287
|
+
self._start_producer()
|
|
288
|
+
return self
|
|
289
|
+
|
|
290
|
+
def __exit__(
|
|
291
|
+
self,
|
|
292
|
+
_exc_type: type[BaseException] | None,
|
|
293
|
+
_exc_val: BaseException | None,
|
|
294
|
+
_exc_tb: TracebackType | None,
|
|
295
|
+
) -> None:
|
|
296
|
+
self._cleanup()
|
|
297
|
+
|
|
298
|
+
def __iter__(self) -> Iterator[messages.ModelResponseStreamEvent]:
|
|
299
|
+
"""Stream the response as an iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s."""
|
|
300
|
+
self._check_context_manager_usage()
|
|
301
|
+
|
|
302
|
+
while True:
|
|
303
|
+
item = self._queue.get()
|
|
304
|
+
if item is None: # End of stream
|
|
305
|
+
break
|
|
306
|
+
elif isinstance(item, Exception):
|
|
307
|
+
raise item
|
|
308
|
+
else:
|
|
309
|
+
yield item
|
|
310
|
+
|
|
311
|
+
def __repr__(self) -> str:
|
|
312
|
+
if self._stream_response:
|
|
313
|
+
return repr(self._stream_response)
|
|
314
|
+
else:
|
|
315
|
+
return f'{self.__class__.__name__}(context_entered={self._context_entered})'
|
|
316
|
+
|
|
317
|
+
__str__ = __repr__
|
|
318
|
+
|
|
319
|
+
def _check_context_manager_usage(self) -> None:
|
|
320
|
+
if not self._context_entered:
|
|
321
|
+
raise RuntimeError(
|
|
322
|
+
'StreamedResponseSync must be used as a context manager. '
|
|
323
|
+
'Use: `with model_request_stream_sync(...) as stream:`'
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
def _ensure_stream_ready(self) -> StreamedResponse:
|
|
327
|
+
self._check_context_manager_usage()
|
|
328
|
+
|
|
329
|
+
if self._stream_response is None:
|
|
330
|
+
# Wait for the background thread to signal that the stream is ready
|
|
331
|
+
if not self._stream_ready.wait(timeout=STREAM_INITIALIZATION_TIMEOUT):
|
|
332
|
+
raise RuntimeError('Stream failed to initialize within timeout')
|
|
333
|
+
|
|
334
|
+
if self._stream_response is None: # pragma: no cover
|
|
335
|
+
raise RuntimeError('Stream failed to initialize')
|
|
336
|
+
|
|
337
|
+
return self._stream_response
|
|
338
|
+
|
|
339
|
+
def _start_producer(self):
|
|
340
|
+
self._thread = threading.Thread(target=self._async_producer, daemon=True)
|
|
341
|
+
self._thread.start()
|
|
342
|
+
|
|
343
|
+
def _async_producer(self):
|
|
344
|
+
async def _consume_async_stream():
|
|
345
|
+
try:
|
|
346
|
+
async with self._async_stream_cm as stream:
|
|
347
|
+
self._stream_response = stream
|
|
348
|
+
# Signal that the stream is ready
|
|
349
|
+
self._stream_ready.set()
|
|
350
|
+
async for event in stream:
|
|
351
|
+
self._queue.put(event)
|
|
352
|
+
except Exception as e:
|
|
353
|
+
# Signal ready even on error so waiting threads don't hang
|
|
354
|
+
self._stream_ready.set()
|
|
355
|
+
self._queue.put(e)
|
|
356
|
+
finally:
|
|
357
|
+
self._queue.put(None) # Signal end
|
|
358
|
+
|
|
359
|
+
_get_event_loop().run_until_complete(_consume_async_stream())
|
|
360
|
+
|
|
361
|
+
def _cleanup(self):
|
|
362
|
+
if self._thread and self._thread.is_alive():
|
|
363
|
+
self._thread.join()
|
|
364
|
+
|
|
365
|
+
def get(self) -> messages.ModelResponse:
|
|
366
|
+
"""Build a ModelResponse from the data received from the stream so far."""
|
|
367
|
+
return self._ensure_stream_ready().get()
|
|
368
|
+
|
|
369
|
+
def usage(self) -> Usage:
|
|
370
|
+
"""Get the usage of the response so far."""
|
|
371
|
+
return self._ensure_stream_ready().usage()
|
|
372
|
+
|
|
373
|
+
@property
|
|
374
|
+
def model_name(self) -> str:
|
|
375
|
+
"""Get the model name of the response."""
|
|
376
|
+
return self._ensure_stream_ready().model_name
|
|
377
|
+
|
|
378
|
+
@property
|
|
379
|
+
def timestamp(self) -> datetime:
|
|
380
|
+
"""Get the timestamp of the response."""
|
|
381
|
+
return self._ensure_stream_ready().timestamp
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Checking whether aci-sdk is installed
|
|
2
|
+
try:
|
|
3
|
+
from aci import ACI
|
|
4
|
+
except ImportError as _import_error:
|
|
5
|
+
raise ImportError('Please install `aci-sdk` to use ACI.dev tools') from _import_error
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from aci import ACI
|
|
10
|
+
|
|
11
|
+
from pydantic_ai import Tool
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _clean_schema(schema):
|
|
15
|
+
if isinstance(schema, dict):
|
|
16
|
+
# Remove non-standard keys (e.g., 'visible')
|
|
17
|
+
return {k: _clean_schema(v) for k, v in schema.items() if k not in {'visible'}}
|
|
18
|
+
elif isinstance(schema, list):
|
|
19
|
+
return [_clean_schema(item) for item in schema]
|
|
20
|
+
else:
|
|
21
|
+
return schema
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def tool_from_aci(aci_function: str, linked_account_owner_id: str) -> Tool:
|
|
25
|
+
"""Creates a Pydantic AI tool proxy from an ACI function.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
aci_function: The ACI function to wrao.
|
|
29
|
+
linked_account_owner_id: The ACI user ID to execute the function on behalf of.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
A Pydantic AI tool that corresponds to the ACI.dev tool.
|
|
33
|
+
"""
|
|
34
|
+
aci = ACI()
|
|
35
|
+
function_definition = aci.functions.get_definition(aci_function)
|
|
36
|
+
function_name = function_definition['function']['name']
|
|
37
|
+
function_description = function_definition['function']['description']
|
|
38
|
+
inputs = function_definition['function']['parameters']
|
|
39
|
+
|
|
40
|
+
json_schema = {
|
|
41
|
+
'additionalProperties': inputs.get('additionalProperties', False),
|
|
42
|
+
'properties': inputs.get('properties', {}),
|
|
43
|
+
'required': inputs.get('required', []),
|
|
44
|
+
# Default to 'object' if not specified
|
|
45
|
+
'type': inputs.get('type', 'object'),
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# Clean the schema
|
|
49
|
+
json_schema = _clean_schema(json_schema)
|
|
50
|
+
|
|
51
|
+
def implementation(*args: Any, **kwargs: Any) -> str:
|
|
52
|
+
if args:
|
|
53
|
+
raise TypeError('Positional arguments are not allowed')
|
|
54
|
+
return aci.handle_function_call(
|
|
55
|
+
function_name,
|
|
56
|
+
kwargs,
|
|
57
|
+
linked_account_owner_id=linked_account_owner_id,
|
|
58
|
+
allowed_apps_only=True,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
return Tool.from_schema(
|
|
62
|
+
function=implementation,
|
|
63
|
+
name=function_name,
|
|
64
|
+
description=function_description,
|
|
65
|
+
json_schema=json_schema,
|
|
66
|
+
)
|
|
@@ -27,13 +27,13 @@ __all__ = ('tool_from_langchain',)
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def tool_from_langchain(langchain_tool: LangChainTool) -> Tool:
|
|
30
|
-
"""Creates a Pydantic tool proxy from a LangChain tool.
|
|
30
|
+
"""Creates a Pydantic AI tool proxy from a LangChain tool.
|
|
31
31
|
|
|
32
32
|
Args:
|
|
33
33
|
langchain_tool: The LangChain tool to wrap.
|
|
34
34
|
|
|
35
35
|
Returns:
|
|
36
|
-
A Pydantic tool that corresponds to the LangChain tool.
|
|
36
|
+
A Pydantic AI tool that corresponds to the LangChain tool.
|
|
37
37
|
"""
|
|
38
38
|
function_name = langchain_tool.name
|
|
39
39
|
function_description = langchain_tool.description
|
|
@@ -99,6 +99,13 @@ class FileUrl(ABC):
|
|
|
99
99
|
* If False, the URL is sent directly to the model and no download is performed.
|
|
100
100
|
"""
|
|
101
101
|
|
|
102
|
+
vendor_metadata: dict[str, Any] | None = None
|
|
103
|
+
"""Vendor-specific metadata for the file.
|
|
104
|
+
|
|
105
|
+
Supported by:
|
|
106
|
+
- `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
107
|
+
"""
|
|
108
|
+
|
|
102
109
|
@property
|
|
103
110
|
@abstractmethod
|
|
104
111
|
def media_type(self) -> str:
|
|
@@ -263,6 +270,13 @@ class BinaryContent:
|
|
|
263
270
|
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
|
|
264
271
|
"""The media type of the binary data."""
|
|
265
272
|
|
|
273
|
+
vendor_metadata: dict[str, Any] | None = None
|
|
274
|
+
"""Vendor-specific metadata for the file.
|
|
275
|
+
|
|
276
|
+
Supported by:
|
|
277
|
+
- `GoogleModel`: `BinaryContent.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
|
|
278
|
+
"""
|
|
279
|
+
|
|
266
280
|
kind: Literal['binary'] = 'binary'
|
|
267
281
|
"""Type identifier, this is available on all parts as a discriminator."""
|
|
268
282
|
|
|
@@ -569,7 +569,17 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
|
|
|
569
569
|
from .cohere import CohereModel
|
|
570
570
|
|
|
571
571
|
return CohereModel(model_name, provider=provider)
|
|
572
|
-
elif provider in (
|
|
572
|
+
elif provider in (
|
|
573
|
+
'openai',
|
|
574
|
+
'deepseek',
|
|
575
|
+
'azure',
|
|
576
|
+
'openrouter',
|
|
577
|
+
'grok',
|
|
578
|
+
'fireworks',
|
|
579
|
+
'together',
|
|
580
|
+
'heroku',
|
|
581
|
+
'github',
|
|
582
|
+
):
|
|
573
583
|
from .openai import OpenAIModel
|
|
574
584
|
|
|
575
585
|
return OpenAIModel(model_name, provider=provider)
|
|
@@ -90,10 +90,9 @@ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models)
|
|
|
90
90
|
|
|
91
91
|
|
|
92
92
|
class AnthropicModelSettings(ModelSettings, total=False):
|
|
93
|
-
"""Settings used for an Anthropic model request.
|
|
93
|
+
"""Settings used for an Anthropic model request."""
|
|
94
94
|
|
|
95
|
-
ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
96
|
-
"""
|
|
95
|
+
# ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
97
96
|
|
|
98
97
|
anthropic_metadata: BetaMetadataParam
|
|
99
98
|
"""An object describing metadata about the request.
|
|
@@ -133,12 +133,12 @@ T = typing.TypeVar('T')
|
|
|
133
133
|
class BedrockModelSettings(ModelSettings, total=False):
|
|
134
134
|
"""Settings for Bedrock models.
|
|
135
135
|
|
|
136
|
-
ALL FIELDS MUST BE `bedrock_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
137
|
-
|
|
138
136
|
See [the Bedrock Converse API docs](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html#API_runtime_Converse_RequestSyntax) for a full list.
|
|
139
137
|
See [the boto3 implementation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/converse.html) of the Bedrock Converse API.
|
|
140
138
|
"""
|
|
141
139
|
|
|
140
|
+
# ALL FIELDS MUST BE `bedrock_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
141
|
+
|
|
142
142
|
bedrock_guardrail_config: GuardrailConfigurationTypeDef
|
|
143
143
|
"""Content moderation and safety settings for Bedrock API requests.
|
|
144
144
|
|
|
@@ -83,10 +83,9 @@ See [Cohere's docs](https://docs.cohere.com/v2/docs/models) for a list of all av
|
|
|
83
83
|
|
|
84
84
|
|
|
85
85
|
class CohereModelSettings(ModelSettings, total=False):
|
|
86
|
-
"""Settings used for a Cohere model request.
|
|
86
|
+
"""Settings used for a Cohere model request."""
|
|
87
87
|
|
|
88
|
-
ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
89
|
-
"""
|
|
88
|
+
# ALL FIELDS MUST BE `cohere_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
90
89
|
|
|
91
90
|
# This class is a placeholder for any future cohere-specific settings
|
|
92
91
|
|
|
@@ -74,10 +74,9 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
|
|
|
74
74
|
|
|
75
75
|
|
|
76
76
|
class GeminiModelSettings(ModelSettings, total=False):
|
|
77
|
-
"""Settings used for a Gemini model request.
|
|
77
|
+
"""Settings used for a Gemini model request."""
|
|
78
78
|
|
|
79
|
-
ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
80
|
-
"""
|
|
79
|
+
# ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
81
80
|
|
|
82
81
|
gemini_safety_settings: list[GeminiSafetySettings]
|
|
83
82
|
"""Safety settings options for Gemini model request."""
|
|
@@ -55,6 +55,7 @@ try:
|
|
|
55
55
|
GenerateContentConfigDict,
|
|
56
56
|
GenerateContentResponse,
|
|
57
57
|
HttpOptionsDict,
|
|
58
|
+
MediaResolution,
|
|
58
59
|
Part,
|
|
59
60
|
PartDict,
|
|
60
61
|
SafetySettingDict,
|
|
@@ -98,10 +99,9 @@ See [the Gemini API docs](https://ai.google.dev/gemini-api/docs/models/gemini#mo
|
|
|
98
99
|
|
|
99
100
|
|
|
100
101
|
class GoogleModelSettings(ModelSettings, total=False):
|
|
101
|
-
"""Settings used for a Gemini model request.
|
|
102
|
+
"""Settings used for a Gemini model request."""
|
|
102
103
|
|
|
103
|
-
ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
104
|
-
"""
|
|
104
|
+
# ALL FIELDS MUST BE `gemini_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
105
105
|
|
|
106
106
|
google_safety_settings: list[SafetySettingDict]
|
|
107
107
|
"""The safety settings to use for the model.
|
|
@@ -121,6 +121,12 @@ class GoogleModelSettings(ModelSettings, total=False):
|
|
|
121
121
|
See the [Gemini API docs](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls) for use cases and limitations.
|
|
122
122
|
"""
|
|
123
123
|
|
|
124
|
+
google_video_resolution: MediaResolution
|
|
125
|
+
"""The video resolution to use for the model.
|
|
126
|
+
|
|
127
|
+
See <https://ai.google.dev/api/generate-content#MediaResolution> for more information.
|
|
128
|
+
"""
|
|
129
|
+
|
|
124
130
|
|
|
125
131
|
@dataclass(init=False)
|
|
126
132
|
class GoogleModel(Model):
|
|
@@ -292,6 +298,7 @@ class GoogleModel(Model):
|
|
|
292
298
|
safety_settings=model_settings.get('google_safety_settings'),
|
|
293
299
|
thinking_config=model_settings.get('google_thinking_config'),
|
|
294
300
|
labels=model_settings.get('google_labels'),
|
|
301
|
+
media_resolution=model_settings.get('google_video_resolution'),
|
|
295
302
|
tools=cast(ToolListUnionDict, tools),
|
|
296
303
|
tool_config=tool_config,
|
|
297
304
|
response_mime_type=response_mime_type,
|
|
@@ -399,9 +406,15 @@ class GoogleModel(Model):
|
|
|
399
406
|
elif isinstance(item, BinaryContent):
|
|
400
407
|
# NOTE: The type from Google GenAI is incorrect, it should be `str`, not `bytes`.
|
|
401
408
|
base64_encoded = base64.b64encode(item.data).decode('utf-8')
|
|
402
|
-
|
|
409
|
+
inline_data_dict = {'inline_data': {'data': base64_encoded, 'mime_type': item.media_type}}
|
|
410
|
+
if item.vendor_metadata:
|
|
411
|
+
inline_data_dict['video_metadata'] = item.vendor_metadata
|
|
412
|
+
content.append(inline_data_dict) # type: ignore
|
|
403
413
|
elif isinstance(item, VideoUrl) and item.is_youtube:
|
|
404
|
-
|
|
414
|
+
file_data_dict = {'file_data': {'file_uri': item.url, 'mime_type': item.media_type}}
|
|
415
|
+
if item.vendor_metadata:
|
|
416
|
+
file_data_dict['video_metadata'] = item.vendor_metadata
|
|
417
|
+
content.append(file_data_dict) # type: ignore
|
|
405
418
|
elif isinstance(item, FileUrl):
|
|
406
419
|
if self.system == 'google-gla' or item.force_download:
|
|
407
420
|
downloaded_item = await download_item(item, data_format='base64')
|
|
@@ -93,10 +93,9 @@ See <https://console.groq.com/docs/models> for an up to date date list of models
|
|
|
93
93
|
|
|
94
94
|
|
|
95
95
|
class GroqModelSettings(ModelSettings, total=False):
|
|
96
|
-
"""Settings used for a Groq model request.
|
|
96
|
+
"""Settings used for a Groq model request."""
|
|
97
97
|
|
|
98
|
-
ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
99
|
-
"""
|
|
98
|
+
# ALL FIELDS MUST BE `groq_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
100
99
|
|
|
101
100
|
groq_reasoning_format: Literal['hidden', 'raw', 'parsed']
|
|
102
101
|
|
|
@@ -16,10 +16,9 @@ if TYPE_CHECKING:
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class MCPSamplingModelSettings(ModelSettings, total=False):
|
|
19
|
-
"""Settings used for an MCP Sampling model request.
|
|
19
|
+
"""Settings used for an MCP Sampling model request."""
|
|
20
20
|
|
|
21
|
-
ALL FIELDS MUST BE `mcp_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
22
|
-
"""
|
|
21
|
+
# ALL FIELDS MUST BE `mcp_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
23
22
|
|
|
24
23
|
mcp_model_preferences: ModelPreferences
|
|
25
24
|
"""Model preferences to use for MCP Sampling."""
|
|
@@ -96,10 +96,9 @@ Since [the Mistral docs](https://docs.mistral.ai/getting-started/models/models_o
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
class MistralModelSettings(ModelSettings, total=False):
|
|
99
|
-
"""Settings used for a Mistral model request.
|
|
99
|
+
"""Settings used for a Mistral model request."""
|
|
100
100
|
|
|
101
|
-
ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
102
|
-
"""
|
|
101
|
+
# ALL FIELDS MUST BE `mistral_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
103
102
|
|
|
104
103
|
# This class is a placeholder for any future mistral-specific settings
|
|
105
104
|
|
|
@@ -96,10 +96,9 @@ OpenAISystemPromptRole = Literal['system', 'developer', 'user']
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
class OpenAIModelSettings(ModelSettings, total=False):
|
|
99
|
-
"""Settings used for an OpenAI model request.
|
|
99
|
+
"""Settings used for an OpenAI model request."""
|
|
100
100
|
|
|
101
|
-
ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
102
|
-
"""
|
|
101
|
+
# ALL FIELDS MUST BE `openai_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
|
|
103
102
|
|
|
104
103
|
openai_reasoning_effort: ReasoningEffort
|
|
105
104
|
"""Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
|
@@ -190,7 +189,9 @@ class OpenAIModel(Model):
|
|
|
190
189
|
self,
|
|
191
190
|
model_name: OpenAIModelName,
|
|
192
191
|
*,
|
|
193
|
-
provider: Literal[
|
|
192
|
+
provider: Literal[
|
|
193
|
+
'openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together', 'heroku', 'github'
|
|
194
|
+
]
|
|
194
195
|
| Provider[AsyncOpenAI] = 'openai',
|
|
195
196
|
profile: ModelProfileSpec | None = None,
|
|
196
197
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
@@ -111,6 +111,10 @@ def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
|
|
|
111
111
|
from .heroku import HerokuProvider
|
|
112
112
|
|
|
113
113
|
return HerokuProvider
|
|
114
|
+
elif provider == 'github':
|
|
115
|
+
from .github import GitHubProvider
|
|
116
|
+
|
|
117
|
+
return GitHubProvider
|
|
114
118
|
else: # pragma: no cover
|
|
115
119
|
raise ValueError(f'Unknown provider: {provider}')
|
|
116
120
|
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import overload
|
|
5
|
+
|
|
6
|
+
from httpx import AsyncClient as AsyncHTTPClient
|
|
7
|
+
|
|
8
|
+
from pydantic_ai.exceptions import UserError
|
|
9
|
+
from pydantic_ai.models import cached_async_http_client
|
|
10
|
+
from pydantic_ai.profiles import ModelProfile
|
|
11
|
+
from pydantic_ai.profiles.cohere import cohere_model_profile
|
|
12
|
+
from pydantic_ai.profiles.deepseek import deepseek_model_profile
|
|
13
|
+
from pydantic_ai.profiles.grok import grok_model_profile
|
|
14
|
+
from pydantic_ai.profiles.meta import meta_model_profile
|
|
15
|
+
from pydantic_ai.profiles.mistral import mistral_model_profile
|
|
16
|
+
from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile, openai_model_profile
|
|
17
|
+
from pydantic_ai.providers import Provider
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
from openai import AsyncOpenAI
|
|
21
|
+
except ImportError as _import_error: # pragma: no cover
|
|
22
|
+
raise ImportError(
|
|
23
|
+
'Please install the `openai` package to use the GitHub Models provider, '
|
|
24
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
25
|
+
) from _import_error
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class GitHubProvider(Provider[AsyncOpenAI]):
|
|
29
|
+
"""Provider for GitHub Models API.
|
|
30
|
+
|
|
31
|
+
GitHub Models provides access to various AI models through an OpenAI-compatible API.
|
|
32
|
+
See <https://docs.github.com/en/github-models> for more information.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def name(self) -> str:
|
|
37
|
+
return 'github'
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def base_url(self) -> str:
|
|
41
|
+
return 'https://models.github.ai/inference'
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def client(self) -> AsyncOpenAI:
|
|
45
|
+
return self._client
|
|
46
|
+
|
|
47
|
+
def model_profile(self, model_name: str) -> ModelProfile | None:
|
|
48
|
+
provider_to_profile = {
|
|
49
|
+
'xai': grok_model_profile,
|
|
50
|
+
'meta': meta_model_profile,
|
|
51
|
+
'microsoft': openai_model_profile,
|
|
52
|
+
'mistral-ai': mistral_model_profile,
|
|
53
|
+
'cohere': cohere_model_profile,
|
|
54
|
+
'deepseek': deepseek_model_profile,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
profile = None
|
|
58
|
+
|
|
59
|
+
# If the model name does not contain a provider prefix, we assume it's an OpenAI model
|
|
60
|
+
if '/' not in model_name:
|
|
61
|
+
return openai_model_profile(model_name)
|
|
62
|
+
|
|
63
|
+
provider, model_name = model_name.lower().split('/', 1)
|
|
64
|
+
if provider in provider_to_profile:
|
|
65
|
+
model_name, *_ = model_name.split(':', 1) # drop tags
|
|
66
|
+
profile = provider_to_profile[provider](model_name)
|
|
67
|
+
|
|
68
|
+
# As GitHubProvider is always used with OpenAIModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
|
|
69
|
+
# we need to maintain that behavior unless json_schema_transformer is set explicitly
|
|
70
|
+
return OpenAIModelProfile(json_schema_transformer=OpenAIJsonSchemaTransformer).update(profile)
|
|
71
|
+
|
|
72
|
+
@overload
|
|
73
|
+
def __init__(self) -> None: ...
|
|
74
|
+
|
|
75
|
+
@overload
|
|
76
|
+
def __init__(self, *, api_key: str) -> None: ...
|
|
77
|
+
|
|
78
|
+
@overload
|
|
79
|
+
def __init__(self, *, api_key: str, http_client: AsyncHTTPClient) -> None: ...
|
|
80
|
+
|
|
81
|
+
@overload
|
|
82
|
+
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
api_key: str | None = None,
|
|
88
|
+
openai_client: AsyncOpenAI | None = None,
|
|
89
|
+
http_client: AsyncHTTPClient | None = None,
|
|
90
|
+
) -> None:
|
|
91
|
+
"""Create a new GitHub Models provider.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
api_key: The GitHub token to use for authentication. If not provided, the `GITHUB_API_KEY`
|
|
95
|
+
environment variable will be used if available.
|
|
96
|
+
openai_client: An existing `AsyncOpenAI` client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
97
|
+
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
98
|
+
"""
|
|
99
|
+
api_key = api_key or os.getenv('GITHUB_API_KEY')
|
|
100
|
+
if not api_key and openai_client is None:
|
|
101
|
+
raise UserError(
|
|
102
|
+
'Set the `GITHUB_API_KEY` environment variable or pass it via `GitHubProvider(api_key=...)`'
|
|
103
|
+
' to use the GitHub Models provider.'
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if openai_client is not None:
|
|
107
|
+
self._client = openai_client
|
|
108
|
+
elif http_client is not None:
|
|
109
|
+
self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
|
110
|
+
else:
|
|
111
|
+
http_client = cached_async_http_client(provider='github')
|
|
112
|
+
self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
|
@@ -59,7 +59,12 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
59
59
|
"""Asynchronously stream the (validated) agent outputs."""
|
|
60
60
|
async for response in self.stream_responses(debounce_by=debounce_by):
|
|
61
61
|
if self._final_result_event is not None:
|
|
62
|
-
|
|
62
|
+
try:
|
|
63
|
+
yield await self._validate_response(
|
|
64
|
+
response, self._final_result_event.tool_name, allow_partial=True
|
|
65
|
+
)
|
|
66
|
+
except ValidationError:
|
|
67
|
+
pass
|
|
63
68
|
if self._final_result_event is not None: # pragma: no branch
|
|
64
69
|
yield await self._validate_response(
|
|
65
70
|
self._raw_stream_response.get(), self._final_result_event.tool_name, allow_partial=False
|
|
@@ -546,6 +551,7 @@ def coalesce_deprecated_return_content(
|
|
|
546
551
|
warnings.warn(
|
|
547
552
|
'`result_tool_return_content` is deprecated, use `output_tool_return_content` instead.',
|
|
548
553
|
DeprecationWarning,
|
|
554
|
+
stacklevel=3,
|
|
549
555
|
)
|
|
550
556
|
return result_tool_return_content
|
|
551
557
|
return output_tool_return_content
|
|
@@ -64,7 +64,7 @@ logfire = ["logfire>=3.11.0"]
|
|
|
64
64
|
openai = ["openai>=1.76.0"]
|
|
65
65
|
cohere = ["cohere>=5.13.11; platform_system != 'Emscripten'"]
|
|
66
66
|
vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
|
|
67
|
-
google = ["google-genai>=1.
|
|
67
|
+
google = ["google-genai>=1.24.0"]
|
|
68
68
|
anthropic = ["anthropic>=0.52.0"]
|
|
69
69
|
groq = ["groq>=0.19.0"]
|
|
70
70
|
mistral = ["mistralai>=1.2.5"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|