pydantic-ai-slim 0.7.5__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/_cli.py +2 -1
- pydantic_ai/ag_ui.py +2 -2
- pydantic_ai/agent/__init__.py +19 -12
- pydantic_ai/agent/abstract.py +31 -18
- pydantic_ai/direct.py +5 -3
- pydantic_ai/durable_exec/temporal/_function_toolset.py +9 -2
- pydantic_ai/mcp.py +48 -71
- pydantic_ai/messages.py +46 -10
- pydantic_ai/models/__init__.py +24 -14
- pydantic_ai/models/anthropic.py +2 -2
- pydantic_ai/models/bedrock.py +1 -1
- pydantic_ai/models/openai.py +74 -38
- pydantic_ai/profiles/__init__.py +1 -1
- pydantic_ai/profiles/harmony.py +13 -0
- pydantic_ai/profiles/openai.py +6 -1
- pydantic_ai/profiles/qwen.py +8 -0
- pydantic_ai/providers/__init__.py +5 -1
- pydantic_ai/providers/azure.py +1 -1
- pydantic_ai/providers/cerebras.py +96 -0
- pydantic_ai/providers/cohere.py +2 -2
- pydantic_ai/providers/deepseek.py +4 -4
- pydantic_ai/providers/fireworks.py +3 -3
- pydantic_ai/providers/github.py +4 -4
- pydantic_ai/providers/grok.py +3 -3
- pydantic_ai/providers/groq.py +3 -3
- pydantic_ai/providers/heroku.py +3 -3
- pydantic_ai/providers/mistral.py +3 -3
- pydantic_ai/providers/moonshotai.py +3 -6
- pydantic_ai/providers/ollama.py +1 -1
- pydantic_ai/providers/openrouter.py +4 -4
- pydantic_ai/providers/together.py +3 -3
- pydantic_ai/providers/vercel.py +4 -4
- pydantic_ai/result.py +5 -5
- pydantic_ai/retries.py +154 -42
- {pydantic_ai_slim-0.7.5.dist-info → pydantic_ai_slim-0.8.0.dist-info}/METADATA +4 -4
- {pydantic_ai_slim-0.7.5.dist-info → pydantic_ai_slim-0.8.0.dist-info}/RECORD +39 -37
- {pydantic_ai_slim-0.7.5.dist-info → pydantic_ai_slim-0.8.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.7.5.dist-info → pydantic_ai_slim-0.8.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-0.7.5.dist-info → pydantic_ai_slim-0.8.0.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/providers/mistral.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import os
|
|
4
4
|
from typing import overload
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import httpx
|
|
7
7
|
|
|
8
8
|
from pydantic_ai.exceptions import UserError
|
|
9
9
|
from pydantic_ai.models import cached_async_http_client
|
|
@@ -42,7 +42,7 @@ class MistralProvider(Provider[Mistral]):
|
|
|
42
42
|
def __init__(self, *, mistral_client: Mistral | None = None) -> None: ...
|
|
43
43
|
|
|
44
44
|
@overload
|
|
45
|
-
def __init__(self, *, api_key: str | None = None, http_client:
|
|
45
|
+
def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
|
|
46
46
|
|
|
47
47
|
def __init__(
|
|
48
48
|
self,
|
|
@@ -50,7 +50,7 @@ class MistralProvider(Provider[Mistral]):
|
|
|
50
50
|
api_key: str | None = None,
|
|
51
51
|
mistral_client: Mistral | None = None,
|
|
52
52
|
base_url: str | None = None,
|
|
53
|
-
http_client:
|
|
53
|
+
http_client: httpx.AsyncClient | None = None,
|
|
54
54
|
) -> None:
|
|
55
55
|
"""Create a new Mistral provider.
|
|
56
56
|
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import os
|
|
4
4
|
from typing import Literal, overload
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
9
|
from pydantic_ai.exceptions import UserError
|
|
@@ -59,9 +59,6 @@ class MoonshotAIProvider(Provider[AsyncOpenAI]):
|
|
|
59
59
|
supports_json_object_output=True,
|
|
60
60
|
).update(profile)
|
|
61
61
|
|
|
62
|
-
# ---------------------------------------------------------------------
|
|
63
|
-
# Construction helpers
|
|
64
|
-
# ---------------------------------------------------------------------
|
|
65
62
|
@overload
|
|
66
63
|
def __init__(self) -> None: ...
|
|
67
64
|
|
|
@@ -69,7 +66,7 @@ class MoonshotAIProvider(Provider[AsyncOpenAI]):
|
|
|
69
66
|
def __init__(self, *, api_key: str) -> None: ...
|
|
70
67
|
|
|
71
68
|
@overload
|
|
72
|
-
def __init__(self, *, api_key: str, http_client:
|
|
69
|
+
def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
|
|
73
70
|
|
|
74
71
|
@overload
|
|
75
72
|
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
@@ -79,7 +76,7 @@ class MoonshotAIProvider(Provider[AsyncOpenAI]):
|
|
|
79
76
|
*,
|
|
80
77
|
api_key: str | None = None,
|
|
81
78
|
openai_client: AsyncOpenAI | None = None,
|
|
82
|
-
http_client:
|
|
79
|
+
http_client: httpx.AsyncClient | None = None,
|
|
83
80
|
) -> None:
|
|
84
81
|
api_key = api_key or os.getenv('MOONSHOTAI_API_KEY')
|
|
85
82
|
if not api_key and openai_client is None:
|
pydantic_ai/providers/ollama.py
CHANGED
|
@@ -58,7 +58,7 @@ class OllamaProvider(Provider[AsyncOpenAI]):
|
|
|
58
58
|
if model_name.startswith(prefix):
|
|
59
59
|
profile = profile_func(model_name)
|
|
60
60
|
|
|
61
|
-
# As OllamaProvider is always used with
|
|
61
|
+
# As OllamaProvider is always used with OpenAIChatModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
|
|
62
62
|
# we need to maintain that behavior unless json_schema_transformer is set explicitly
|
|
63
63
|
return OpenAIModelProfile(json_schema_transformer=OpenAIJsonSchemaTransformer).update(profile)
|
|
64
64
|
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import os
|
|
4
4
|
from typing import overload
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
9
|
from pydantic_ai.exceptions import UserError
|
|
@@ -68,7 +68,7 @@ class OpenRouterProvider(Provider[AsyncOpenAI]):
|
|
|
68
68
|
model_name, *_ = model_name.split(':', 1) # drop tags
|
|
69
69
|
profile = provider_to_profile[provider](model_name)
|
|
70
70
|
|
|
71
|
-
# As OpenRouterProvider is always used with
|
|
71
|
+
# As OpenRouterProvider is always used with OpenAIChatModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
|
|
72
72
|
# we need to maintain that behavior unless json_schema_transformer is set explicitly
|
|
73
73
|
return OpenAIModelProfile(json_schema_transformer=OpenAIJsonSchemaTransformer).update(profile)
|
|
74
74
|
|
|
@@ -79,7 +79,7 @@ class OpenRouterProvider(Provider[AsyncOpenAI]):
|
|
|
79
79
|
def __init__(self, *, api_key: str) -> None: ...
|
|
80
80
|
|
|
81
81
|
@overload
|
|
82
|
-
def __init__(self, *, api_key: str, http_client:
|
|
82
|
+
def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
|
|
83
83
|
|
|
84
84
|
@overload
|
|
85
85
|
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
@@ -89,7 +89,7 @@ class OpenRouterProvider(Provider[AsyncOpenAI]):
|
|
|
89
89
|
*,
|
|
90
90
|
api_key: str | None = None,
|
|
91
91
|
openai_client: AsyncOpenAI | None = None,
|
|
92
|
-
http_client:
|
|
92
|
+
http_client: httpx.AsyncClient | None = None,
|
|
93
93
|
) -> None:
|
|
94
94
|
api_key = api_key or os.getenv('OPENROUTER_API_KEY')
|
|
95
95
|
if not api_key and openai_client is None:
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import os
|
|
4
4
|
from typing import overload
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import httpx
|
|
7
7
|
from openai import AsyncOpenAI
|
|
8
8
|
|
|
9
9
|
from pydantic_ai.exceptions import UserError
|
|
@@ -68,7 +68,7 @@ class TogetherProvider(Provider[AsyncOpenAI]):
|
|
|
68
68
|
def __init__(self, *, api_key: str) -> None: ...
|
|
69
69
|
|
|
70
70
|
@overload
|
|
71
|
-
def __init__(self, *, api_key: str, http_client:
|
|
71
|
+
def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
|
|
72
72
|
|
|
73
73
|
@overload
|
|
74
74
|
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
@@ -78,7 +78,7 @@ class TogetherProvider(Provider[AsyncOpenAI]):
|
|
|
78
78
|
*,
|
|
79
79
|
api_key: str | None = None,
|
|
80
80
|
openai_client: AsyncOpenAI | None = None,
|
|
81
|
-
http_client:
|
|
81
|
+
http_client: httpx.AsyncClient | None = None,
|
|
82
82
|
) -> None:
|
|
83
83
|
api_key = api_key or os.getenv('TOGETHER_API_KEY')
|
|
84
84
|
if not api_key and openai_client is None:
|
pydantic_ai/providers/vercel.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import os
|
|
4
4
|
from typing import overload
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import httpx
|
|
7
7
|
|
|
8
8
|
from pydantic_ai.exceptions import UserError
|
|
9
9
|
from pydantic_ai.models import cached_async_http_client
|
|
@@ -64,7 +64,7 @@ class VercelProvider(Provider[AsyncOpenAI]):
|
|
|
64
64
|
if provider in provider_to_profile:
|
|
65
65
|
profile = provider_to_profile[provider](model_name)
|
|
66
66
|
|
|
67
|
-
# As VercelProvider is always used with
|
|
67
|
+
# As VercelProvider is always used with OpenAIChatModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
|
|
68
68
|
# we need to maintain that behavior unless json_schema_transformer is set explicitly
|
|
69
69
|
return OpenAIModelProfile(
|
|
70
70
|
json_schema_transformer=OpenAIJsonSchemaTransformer,
|
|
@@ -77,7 +77,7 @@ class VercelProvider(Provider[AsyncOpenAI]):
|
|
|
77
77
|
def __init__(self, *, api_key: str) -> None: ...
|
|
78
78
|
|
|
79
79
|
@overload
|
|
80
|
-
def __init__(self, *, api_key: str, http_client:
|
|
80
|
+
def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
|
|
81
81
|
|
|
82
82
|
@overload
|
|
83
83
|
def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
|
|
@@ -87,7 +87,7 @@ class VercelProvider(Provider[AsyncOpenAI]):
|
|
|
87
87
|
*,
|
|
88
88
|
api_key: str | None = None,
|
|
89
89
|
openai_client: AsyncOpenAI | None = None,
|
|
90
|
-
http_client:
|
|
90
|
+
http_client: httpx.AsyncClient | None = None,
|
|
91
91
|
) -> None:
|
|
92
92
|
# Support Vercel AI Gateway's standard environment variables
|
|
93
93
|
api_key = api_key or os.getenv('VERCEL_AI_GATEWAY_API_KEY') or os.getenv('VERCEL_OIDC_TOKEN')
|
pydantic_ai/result.py
CHANGED
|
@@ -22,7 +22,7 @@ from ._output import (
|
|
|
22
22
|
ToolOutputSchema,
|
|
23
23
|
)
|
|
24
24
|
from ._run_context import AgentDepsT, RunContext
|
|
25
|
-
from .messages import
|
|
25
|
+
from .messages import ModelResponseStreamEvent
|
|
26
26
|
from .output import (
|
|
27
27
|
OutputDataT,
|
|
28
28
|
ToolOutput,
|
|
@@ -51,7 +51,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
51
51
|
_usage_limits: UsageLimits | None
|
|
52
52
|
_tool_manager: ToolManager[AgentDepsT]
|
|
53
53
|
|
|
54
|
-
_agent_stream_iterator: AsyncIterator[
|
|
54
|
+
_agent_stream_iterator: AsyncIterator[ModelResponseStreamEvent] | None = field(default=None, init=False)
|
|
55
55
|
_initial_run_ctx_usage: RunUsage = field(init=False)
|
|
56
56
|
|
|
57
57
|
def __post_init__(self):
|
|
@@ -221,8 +221,8 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
221
221
|
deltas.append(text)
|
|
222
222
|
yield ''.join(deltas)
|
|
223
223
|
|
|
224
|
-
def __aiter__(self) -> AsyncIterator[
|
|
225
|
-
"""Stream [`
|
|
224
|
+
def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
225
|
+
"""Stream [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s."""
|
|
226
226
|
if self._agent_stream_iterator is None:
|
|
227
227
|
self._agent_stream_iterator = _get_usage_checking_stream_response(
|
|
228
228
|
self._raw_stream_response, self._usage_limits, self.usage
|
|
@@ -426,7 +426,7 @@ def _get_usage_checking_stream_response(
|
|
|
426
426
|
stream_response: models.StreamedResponse,
|
|
427
427
|
limits: UsageLimits | None,
|
|
428
428
|
get_usage: Callable[[], RunUsage],
|
|
429
|
-
) -> AsyncIterator[
|
|
429
|
+
) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
430
430
|
if limits is not None and limits.has_token_limits():
|
|
431
431
|
|
|
432
432
|
async def _usage_checking_iterator():
|
pydantic_ai/retries.py
CHANGED
|
@@ -13,25 +13,103 @@ The module includes:
|
|
|
13
13
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from httpx import
|
|
16
|
+
from httpx import (
|
|
17
|
+
AsyncBaseTransport,
|
|
18
|
+
AsyncHTTPTransport,
|
|
19
|
+
BaseTransport,
|
|
20
|
+
HTTPStatusError,
|
|
21
|
+
HTTPTransport,
|
|
22
|
+
Request,
|
|
23
|
+
Response,
|
|
24
|
+
)
|
|
17
25
|
|
|
18
26
|
try:
|
|
19
|
-
from tenacity import AsyncRetrying, Retrying
|
|
27
|
+
from tenacity import AsyncRetrying, RetryCallState, RetryError, Retrying, retry, wait_exponential
|
|
20
28
|
except ImportError as _import_error:
|
|
21
29
|
raise ImportError(
|
|
22
30
|
'Please install `tenacity` to use the retries utilities, '
|
|
23
31
|
'you can use the `retries` optional group — `pip install "pydantic-ai-slim[retries]"`'
|
|
24
32
|
) from _import_error
|
|
25
33
|
|
|
26
|
-
|
|
27
|
-
__all__ = ['TenacityTransport', 'AsyncTenacityTransport', 'wait_retry_after']
|
|
28
|
-
|
|
34
|
+
from collections.abc import Awaitable
|
|
29
35
|
from datetime import datetime, timezone
|
|
30
36
|
from email.utils import parsedate_to_datetime
|
|
31
|
-
from typing import Callable, cast
|
|
37
|
+
from typing import TYPE_CHECKING, Any, Callable, NoReturn, cast
|
|
38
|
+
|
|
39
|
+
from typing_extensions import TypedDict
|
|
40
|
+
|
|
41
|
+
if TYPE_CHECKING:
|
|
42
|
+
from tenacity.asyncio.retry import RetryBaseT
|
|
43
|
+
from tenacity.retry import RetryBaseT as SyncRetryBaseT
|
|
44
|
+
from tenacity.stop import StopBaseT
|
|
45
|
+
from tenacity.wait import WaitBaseT
|
|
46
|
+
|
|
47
|
+
__all__ = ['RetryConfig', 'TenacityTransport', 'AsyncTenacityTransport', 'wait_retry_after']
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class RetryConfig(TypedDict, total=False):
|
|
51
|
+
"""The configuration for tenacity-based retrying.
|
|
52
|
+
|
|
53
|
+
These are precisely the arguments to the tenacity `retry` decorator, and they are generally
|
|
54
|
+
used internally by passing them to that decorator via `@retry(**config)` or similar.
|
|
55
|
+
|
|
56
|
+
All fields are optional, and if not provided, the default values from the `tenacity.retry` decorator will be used.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
sleep: Callable[[int | float], None | Awaitable[None]]
|
|
60
|
+
"""A sleep strategy to use for sleeping between retries.
|
|
61
|
+
|
|
62
|
+
Tenacity's default for this argument is `tenacity.nap.sleep`."""
|
|
63
|
+
|
|
64
|
+
stop: StopBaseT
|
|
65
|
+
"""
|
|
66
|
+
A stop strategy to determine when to stop retrying.
|
|
67
|
+
|
|
68
|
+
Tenacity's default for this argument is `tenacity.stop.stop_never`."""
|
|
69
|
+
|
|
70
|
+
wait: WaitBaseT
|
|
71
|
+
"""
|
|
72
|
+
A wait strategy to determine how long to wait between retries.
|
|
73
|
+
|
|
74
|
+
Tenacity's default for this argument is `tenacity.wait.wait_none`."""
|
|
75
|
+
|
|
76
|
+
retry: SyncRetryBaseT | RetryBaseT
|
|
77
|
+
"""A retry strategy to determine which exceptions should trigger a retry.
|
|
78
|
+
|
|
79
|
+
Tenacity's default for this argument is `tenacity.retry.retry_if_exception_type()`."""
|
|
80
|
+
|
|
81
|
+
before: Callable[[RetryCallState], None | Awaitable[None]]
|
|
82
|
+
"""
|
|
83
|
+
A callable that is called before each retry attempt.
|
|
32
84
|
|
|
33
|
-
|
|
34
|
-
|
|
85
|
+
Tenacity's default for this argument is `tenacity.before.before_nothing`."""
|
|
86
|
+
|
|
87
|
+
after: Callable[[RetryCallState], None | Awaitable[None]]
|
|
88
|
+
"""
|
|
89
|
+
A callable that is called after each retry attempt.
|
|
90
|
+
|
|
91
|
+
Tenacity's default for this argument is `tenacity.after.after_nothing`."""
|
|
92
|
+
|
|
93
|
+
before_sleep: Callable[[RetryCallState], None | Awaitable[None]] | None
|
|
94
|
+
"""
|
|
95
|
+
An optional callable that is called before sleeping between retries.
|
|
96
|
+
|
|
97
|
+
Tenacity's default for this argument is `None`."""
|
|
98
|
+
|
|
99
|
+
reraise: bool
|
|
100
|
+
"""Whether to reraise the last exception if the retry attempts are exhausted, or raise a RetryError instead.
|
|
101
|
+
|
|
102
|
+
Tenacity's default for this argument is `False`."""
|
|
103
|
+
|
|
104
|
+
retry_error_cls: type[RetryError]
|
|
105
|
+
"""The exception class to raise when the retry attempts are exhausted and `reraise` is False.
|
|
106
|
+
|
|
107
|
+
Tenacity's default for this argument is `tenacity.RetryError`."""
|
|
108
|
+
|
|
109
|
+
retry_error_callback: Callable[[RetryCallState], Any | Awaitable[Any]] | None
|
|
110
|
+
"""An optional callable that is called when the retry attempts are exhausted and `reraise` is False.
|
|
111
|
+
|
|
112
|
+
Tenacity's default for this argument is `None`."""
|
|
35
113
|
|
|
36
114
|
|
|
37
115
|
class TenacityTransport(BaseTransport):
|
|
@@ -47,8 +125,8 @@ class TenacityTransport(BaseTransport):
|
|
|
47
125
|
|
|
48
126
|
Args:
|
|
49
127
|
wrapped: The underlying transport to wrap and add retry functionality to.
|
|
50
|
-
|
|
51
|
-
|
|
128
|
+
config: The arguments to use for the tenacity `retry` decorator, including retry conditions,
|
|
129
|
+
wait strategy, stop conditions, etc. See the tenacity docs for more info.
|
|
52
130
|
validate_response: Optional callable that takes a Response and can raise an exception
|
|
53
131
|
to be handled by the controller if the response should trigger a retry.
|
|
54
132
|
Common use case is to raise exceptions for certain HTTP status codes.
|
|
@@ -57,17 +135,17 @@ class TenacityTransport(BaseTransport):
|
|
|
57
135
|
Example:
|
|
58
136
|
```python
|
|
59
137
|
from httpx import Client, HTTPTransport, HTTPStatusError
|
|
60
|
-
from tenacity import
|
|
61
|
-
from pydantic_ai.retries import TenacityTransport, wait_retry_after
|
|
138
|
+
from tenacity import stop_after_attempt, retry_if_exception_type
|
|
139
|
+
from pydantic_ai.retries import RetryConfig, TenacityTransport, wait_retry_after
|
|
62
140
|
|
|
63
141
|
transport = TenacityTransport(
|
|
64
|
-
|
|
65
|
-
Retrying(
|
|
142
|
+
RetryConfig(
|
|
66
143
|
retry=retry_if_exception_type(HTTPStatusError),
|
|
67
144
|
wait=wait_retry_after(max_wait=300),
|
|
68
145
|
stop=stop_after_attempt(5),
|
|
69
146
|
reraise=True
|
|
70
147
|
),
|
|
148
|
+
HTTPTransport(),
|
|
71
149
|
validate_response=lambda r: r.raise_for_status()
|
|
72
150
|
)
|
|
73
151
|
client = Client(transport=transport)
|
|
@@ -76,11 +154,22 @@ class TenacityTransport(BaseTransport):
|
|
|
76
154
|
|
|
77
155
|
def __init__(
|
|
78
156
|
self,
|
|
79
|
-
|
|
157
|
+
config: RetryConfig,
|
|
80
158
|
wrapped: BaseTransport | None = None,
|
|
81
|
-
validate_response: Callable[[Response],
|
|
159
|
+
validate_response: Callable[[Response], Any] | None = None,
|
|
160
|
+
**kwargs: NoReturn,
|
|
82
161
|
):
|
|
83
|
-
|
|
162
|
+
# TODO: Remove the following checks (and **kwargs) during v1 release
|
|
163
|
+
if 'controller' in kwargs: # pragma: no cover
|
|
164
|
+
raise TypeError('The `controller` argument has been renamed to `config`, and now requires a `RetryConfig`.')
|
|
165
|
+
if kwargs: # pragma: no cover
|
|
166
|
+
raise TypeError(f'Unexpected keyword arguments: {", ".join(kwargs)}')
|
|
167
|
+
if isinstance(config, Retrying): # pragma: no cover
|
|
168
|
+
raise ValueError(
|
|
169
|
+
'Passing a Retrying instance is no longer supported; the `config` argument must be a `pydantic_ai.retries.RetryConfig`.'
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
self.config = config
|
|
84
173
|
self.wrapped = wrapped or HTTPTransport()
|
|
85
174
|
self.validate_response = validate_response
|
|
86
175
|
|
|
@@ -97,13 +186,19 @@ class TenacityTransport(BaseTransport):
|
|
|
97
186
|
RuntimeError: If the retry controller did not make any attempts.
|
|
98
187
|
Exception: Any exception raised by the wrapped transport or validation function.
|
|
99
188
|
"""
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
189
|
+
|
|
190
|
+
@retry(**self.config)
|
|
191
|
+
def handle_request(req: Request) -> Response:
|
|
192
|
+
response = self.wrapped.handle_request(req)
|
|
193
|
+
|
|
194
|
+
# this is normally set by httpx _after_ calling this function, but we want the request in the validator:
|
|
195
|
+
response.request = req
|
|
196
|
+
|
|
197
|
+
if self.validate_response:
|
|
198
|
+
self.validate_response(response)
|
|
199
|
+
return response
|
|
200
|
+
|
|
201
|
+
return handle_request(request)
|
|
107
202
|
|
|
108
203
|
|
|
109
204
|
class AsyncTenacityTransport(AsyncBaseTransport):
|
|
@@ -119,8 +214,8 @@ class AsyncTenacityTransport(AsyncBaseTransport):
|
|
|
119
214
|
|
|
120
215
|
Args:
|
|
121
216
|
wrapped: The underlying async transport to wrap and add retry functionality to.
|
|
122
|
-
|
|
123
|
-
|
|
217
|
+
config: The arguments to use for the tenacity `retry` decorator, including retry conditions,
|
|
218
|
+
wait strategy, stop conditions, etc. See the tenacity docs for more info.
|
|
124
219
|
validate_response: Optional callable that takes a Response and can raise an exception
|
|
125
220
|
to be handled by the controller if the response should trigger a retry.
|
|
126
221
|
Common use case is to raise exceptions for certain HTTP status codes.
|
|
@@ -129,11 +224,11 @@ class AsyncTenacityTransport(AsyncBaseTransport):
|
|
|
129
224
|
Example:
|
|
130
225
|
```python
|
|
131
226
|
from httpx import AsyncClient, HTTPStatusError
|
|
132
|
-
from tenacity import
|
|
133
|
-
from pydantic_ai.retries import AsyncTenacityTransport, wait_retry_after
|
|
227
|
+
from tenacity import stop_after_attempt, retry_if_exception_type
|
|
228
|
+
from pydantic_ai.retries import AsyncTenacityTransport, RetryConfig, wait_retry_after
|
|
134
229
|
|
|
135
230
|
transport = AsyncTenacityTransport(
|
|
136
|
-
|
|
231
|
+
RetryConfig(
|
|
137
232
|
retry=retry_if_exception_type(HTTPStatusError),
|
|
138
233
|
wait=wait_retry_after(max_wait=300),
|
|
139
234
|
stop=stop_after_attempt(5),
|
|
@@ -147,11 +242,22 @@ class AsyncTenacityTransport(AsyncBaseTransport):
|
|
|
147
242
|
|
|
148
243
|
def __init__(
|
|
149
244
|
self,
|
|
150
|
-
|
|
245
|
+
config: RetryConfig,
|
|
151
246
|
wrapped: AsyncBaseTransport | None = None,
|
|
152
|
-
validate_response: Callable[[Response],
|
|
247
|
+
validate_response: Callable[[Response], Any] | None = None,
|
|
248
|
+
**kwargs: NoReturn,
|
|
153
249
|
):
|
|
154
|
-
|
|
250
|
+
# TODO: Remove the following checks (and **kwargs) during v1 release
|
|
251
|
+
if 'controller' in kwargs: # pragma: no cover
|
|
252
|
+
raise TypeError('The `controller` argument has been renamed to `config`, and now requires a `RetryConfig`.')
|
|
253
|
+
if kwargs: # pragma: no cover
|
|
254
|
+
raise TypeError(f'Unexpected keyword arguments: {", ".join(kwargs)}')
|
|
255
|
+
if isinstance(config, AsyncRetrying): # pragma: no cover
|
|
256
|
+
raise ValueError(
|
|
257
|
+
'Passing an AsyncRetrying instance is no longer supported; the `config` argument must be a `pydantic_ai.retries.RetryConfig`.'
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
self.config = config
|
|
155
261
|
self.wrapped = wrapped or AsyncHTTPTransport()
|
|
156
262
|
self.validate_response = validate_response
|
|
157
263
|
|
|
@@ -168,13 +274,19 @@ class AsyncTenacityTransport(AsyncBaseTransport):
|
|
|
168
274
|
RuntimeError: If the retry controller did not make any attempts.
|
|
169
275
|
Exception: Any exception raised by the wrapped transport or validation function.
|
|
170
276
|
"""
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
277
|
+
|
|
278
|
+
@retry(**self.config)
|
|
279
|
+
async def handle_async_request(req: Request) -> Response:
|
|
280
|
+
response = await self.wrapped.handle_async_request(req)
|
|
281
|
+
|
|
282
|
+
# this is normally set by httpx _after_ calling this function, but we want the request in the validator:
|
|
283
|
+
response.request = req
|
|
284
|
+
|
|
285
|
+
if self.validate_response:
|
|
286
|
+
self.validate_response(response)
|
|
287
|
+
return response
|
|
288
|
+
|
|
289
|
+
return await handle_async_request(request)
|
|
178
290
|
|
|
179
291
|
|
|
180
292
|
def wait_retry_after(
|
|
@@ -202,11 +314,11 @@ def wait_retry_after(
|
|
|
202
314
|
Example:
|
|
203
315
|
```python
|
|
204
316
|
from httpx import AsyncClient, HTTPStatusError
|
|
205
|
-
from tenacity import
|
|
206
|
-
from pydantic_ai.retries import AsyncTenacityTransport, wait_retry_after
|
|
317
|
+
from tenacity import stop_after_attempt, retry_if_exception_type
|
|
318
|
+
from pydantic_ai.retries import AsyncTenacityTransport, RetryConfig, wait_retry_after
|
|
207
319
|
|
|
208
320
|
transport = AsyncTenacityTransport(
|
|
209
|
-
|
|
321
|
+
RetryConfig(
|
|
210
322
|
retry=retry_if_exception_type(HTTPStatusError),
|
|
211
323
|
wait=wait_retry_after(max_wait=120),
|
|
212
324
|
stop=stop_after_attempt(5),
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -35,7 +35,7 @@ Requires-Dist: genai-prices>=0.0.22
|
|
|
35
35
|
Requires-Dist: griffe>=1.3.2
|
|
36
36
|
Requires-Dist: httpx>=0.27
|
|
37
37
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
38
|
-
Requires-Dist: pydantic-graph==0.
|
|
38
|
+
Requires-Dist: pydantic-graph==0.8.0
|
|
39
39
|
Requires-Dist: pydantic>=2.10
|
|
40
40
|
Requires-Dist: typing-inspection>=0.4.0
|
|
41
41
|
Provides-Extra: a2a
|
|
@@ -57,7 +57,7 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==0.
|
|
60
|
+
Requires-Dist: pydantic-evals==0.8.0; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -67,7 +67,7 @@ Requires-Dist: huggingface-hub[inference]>=0.33.5; extra == 'huggingface'
|
|
|
67
67
|
Provides-Extra: logfire
|
|
68
68
|
Requires-Dist: logfire>=3.14.1; extra == 'logfire'
|
|
69
69
|
Provides-Extra: mcp
|
|
70
|
-
Requires-Dist: mcp>=1.
|
|
70
|
+
Requires-Dist: mcp>=1.12.3; (python_version >= '3.10') and extra == 'mcp'
|
|
71
71
|
Provides-Extra: mistral
|
|
72
72
|
Requires-Dist: mistralai>=1.9.2; extra == 'mistral'
|
|
73
73
|
Provides-Extra: openai
|