mirascope 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. mirascope/api/_generated/functions/client.py +10 -0
  2. mirascope/api/_generated/functions/raw_client.py +8 -0
  3. mirascope/api/_generated/functions/types/functions_create_response.py +25 -8
  4. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +25 -10
  5. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +1 -0
  6. mirascope/api/_generated/functions/types/functions_get_response.py +25 -8
  7. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +1 -0
  8. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +22 -7
  9. mirascope/api/_generated/reference.md +9 -0
  10. mirascope/llm/__init__.py +42 -0
  11. mirascope/llm/calls/calls.py +38 -11
  12. mirascope/llm/exceptions.py +69 -0
  13. mirascope/llm/prompts/prompts.py +47 -9
  14. mirascope/llm/providers/__init__.py +3 -0
  15. mirascope/llm/providers/openai/completions/_utils/__init__.py +3 -0
  16. mirascope/llm/providers/openai/completions/_utils/encode.py +27 -32
  17. mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
  18. mirascope/llm/providers/openai/completions/base_provider.py +21 -0
  19. mirascope/llm/providers/openai/completions/provider.py +8 -2
  20. mirascope/llm/providers/openrouter/__init__.py +5 -0
  21. mirascope/llm/providers/openrouter/provider.py +67 -0
  22. mirascope/llm/providers/provider_id.py +2 -0
  23. mirascope/llm/providers/provider_registry.py +6 -0
  24. mirascope/llm/responses/response.py +217 -0
  25. mirascope/llm/responses/stream_response.py +234 -0
  26. mirascope/llm/retries/__init__.py +51 -0
  27. mirascope/llm/retries/retry_calls.py +159 -0
  28. mirascope/llm/retries/retry_config.py +168 -0
  29. mirascope/llm/retries/retry_decorator.py +258 -0
  30. mirascope/llm/retries/retry_models.py +1313 -0
  31. mirascope/llm/retries/retry_prompts.py +227 -0
  32. mirascope/llm/retries/retry_responses.py +340 -0
  33. mirascope/llm/retries/retry_stream_responses.py +571 -0
  34. mirascope/llm/retries/utils.py +159 -0
  35. mirascope/ops/_internal/versioned_calls.py +249 -9
  36. mirascope/ops/_internal/versioned_functions.py +2 -0
  37. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/METADATA +1 -1
  38. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/RECORD +40 -28
  39. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/WHEEL +0 -0
  40. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,159 @@
1
+ """RetryCall extends Call to return RetryResponse instead of Response."""
2
+
3
+ from typing import Generic, TypeVar, overload
4
+
5
+ from ..calls.calls import BaseCall
6
+ from ..formatting import FormattableT
7
+ from ..types import P
8
+ from .retry_config import RetryConfig
9
+ from .retry_models import RetryModel
10
+ from .retry_prompts import (
11
+ AsyncRetryPrompt,
12
+ BaseRetryPrompt,
13
+ RetryPrompt,
14
+ )
15
+ from .retry_responses import (
16
+ AsyncRetryResponse,
17
+ RetryResponse,
18
+ )
19
+ from .retry_stream_responses import (
20
+ AsyncRetryStreamResponse,
21
+ RetryStreamResponse,
22
+ )
23
+
24
+ RetryPromptT = TypeVar("RetryPromptT", bound=BaseRetryPrompt)
25
+
26
+
27
+ class BaseRetryCall(BaseCall[RetryPromptT], Generic[RetryPromptT]):
28
+ """Base class for retry-enabled calls with shared model wrapping."""
29
+
30
+ @property
31
+ def retry_config(self) -> RetryConfig:
32
+ """The retry configuration from the prompt."""
33
+ return self.prompt.retry_config
34
+
35
+ @property
36
+ def model(self) -> RetryModel:
37
+ """The model used for generating responses. May be overwritten via `with llm.model(...)`."""
38
+ model = super().model
39
+ if isinstance(model, RetryModel):
40
+ return model
41
+ return RetryModel(model, self.retry_config)
42
+
43
+
44
+ class RetryCall(BaseRetryCall[RetryPrompt[P, FormattableT]], Generic[P, FormattableT]):
45
+ """A retry-enabled call that extends BaseRetryCall and returns RetryResponse.
46
+
47
+ This extends BaseRetryCall with a RetryPrompt and returns RetryResponse
48
+ instead of Response. The prompt contains the retry configuration.
49
+
50
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
51
+ """
52
+
53
+ @overload
54
+ def __call__(
55
+ self: "RetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
56
+ ) -> RetryResponse[None]: ...
57
+
58
+ @overload
59
+ def __call__(
60
+ self: "RetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
61
+ ) -> RetryResponse[FormattableT]: ...
62
+
63
+ def __call__(
64
+ self, *args: P.args, **kwargs: P.kwargs
65
+ ) -> RetryResponse[None] | RetryResponse[FormattableT]:
66
+ """Generates a retry response using the LLM."""
67
+ return self.call(*args, **kwargs)
68
+
69
+ @overload
70
+ def call(
71
+ self: "RetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
72
+ ) -> RetryResponse[None]: ...
73
+
74
+ @overload
75
+ def call(
76
+ self: "RetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
77
+ ) -> RetryResponse[FormattableT]: ...
78
+
79
+ def call(
80
+ self, *args: P.args, **kwargs: P.kwargs
81
+ ) -> RetryResponse[None] | RetryResponse[FormattableT]:
82
+ """Generates a retry response using the LLM."""
83
+ return self.prompt.call(self.model, *args, **kwargs)
84
+
85
+ @overload
86
+ def stream(
87
+ self: "RetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
88
+ ) -> RetryStreamResponse[None]: ...
89
+
90
+ @overload
91
+ def stream(
92
+ self: "RetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
93
+ ) -> RetryStreamResponse[FormattableT]: ...
94
+
95
+ def stream(
96
+ self, *args: P.args, **kwargs: P.kwargs
97
+ ) -> RetryStreamResponse[None] | RetryStreamResponse[FormattableT]:
98
+ """Generates a retry stream response using the LLM."""
99
+ return self.prompt.stream(self.model, *args, **kwargs)
100
+
101
+
102
+ class AsyncRetryCall(
103
+ BaseRetryCall[AsyncRetryPrompt[P, FormattableT]], Generic[P, FormattableT]
104
+ ):
105
+ """An async retry-enabled call that extends BaseRetryCall and returns AsyncRetryResponse.
106
+
107
+ This extends BaseRetryCall with an AsyncRetryPrompt and returns AsyncRetryResponse
108
+ instead of AsyncResponse. The prompt contains the retry configuration.
109
+
110
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
111
+ """
112
+
113
+ @overload
114
+ async def __call__(
115
+ self: "AsyncRetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
116
+ ) -> AsyncRetryResponse[None]: ...
117
+
118
+ @overload
119
+ async def __call__(
120
+ self: "AsyncRetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
121
+ ) -> AsyncRetryResponse[FormattableT]: ...
122
+
123
+ async def __call__(
124
+ self, *args: P.args, **kwargs: P.kwargs
125
+ ) -> AsyncRetryResponse[None] | AsyncRetryResponse[FormattableT]:
126
+ """Generates a retry response using the LLM asynchronously."""
127
+ return await self.call(*args, **kwargs)
128
+
129
+ @overload
130
+ async def call(
131
+ self: "AsyncRetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
132
+ ) -> AsyncRetryResponse[None]: ...
133
+
134
+ @overload
135
+ async def call(
136
+ self: "AsyncRetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
137
+ ) -> AsyncRetryResponse[FormattableT]: ...
138
+
139
+ async def call(
140
+ self, *args: P.args, **kwargs: P.kwargs
141
+ ) -> AsyncRetryResponse[None] | AsyncRetryResponse[FormattableT]:
142
+ """Generates a retry response using the LLM asynchronously."""
143
+ return await self.prompt.call(self.model, *args, **kwargs)
144
+
145
+ @overload
146
+ async def stream(
147
+ self: "AsyncRetryCall[P, None]", *args: P.args, **kwargs: P.kwargs
148
+ ) -> AsyncRetryStreamResponse[None]: ...
149
+
150
+ @overload
151
+ async def stream(
152
+ self: "AsyncRetryCall[P, FormattableT]", *args: P.args, **kwargs: P.kwargs
153
+ ) -> AsyncRetryStreamResponse[FormattableT]: ...
154
+
155
+ async def stream(
156
+ self, *args: P.args, **kwargs: P.kwargs
157
+ ) -> AsyncRetryStreamResponse[None] | AsyncRetryStreamResponse[FormattableT]:
158
+ """Generates a retry stream response using the LLM asynchronously."""
159
+ return await self.prompt.stream(self.model, *args, **kwargs)
@@ -0,0 +1,168 @@
1
+ """Configuration for retry behavior."""
2
+
3
+ from collections.abc import Sequence
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, TypedDict
6
+ from typing_extensions import Unpack
7
+
8
+ from ..exceptions import (
9
+ ConnectionError,
10
+ RateLimitError,
11
+ ServerError,
12
+ TimeoutError,
13
+ )
14
+
15
+ if TYPE_CHECKING:
16
+ from ..models import Model
17
+ from ..providers import ModelId
18
+
19
+ DEFAULT_RETRYABLE_ERRORS: tuple[type[Exception], ...] = (
20
+ ConnectionError,
21
+ RateLimitError,
22
+ ServerError,
23
+ TimeoutError,
24
+ )
25
+ """Default exceptions that trigger a retry.
26
+
27
+ These are transient errors that are likely to succeed on retry:
28
+ - ConnectionError: Network issues, DNS failures
29
+ - RateLimitError: Rate limits exceeded (429)
30
+ - ServerError: Provider-side errors (500+)
31
+ - TimeoutError: Request timeouts
32
+ """
33
+
34
+ DEFAULT_MAX_RETRIES: int = 3
35
+ """Default maximum number of retries after the initial attempt fails."""
36
+
37
+ DEFAULT_INITIAL_DELAY: float = 0.5
38
+ """Default initial delay in seconds before the first retry."""
39
+
40
+ DEFAULT_MAX_DELAY: float = 60.0
41
+ """Default maximum delay in seconds between retries."""
42
+
43
+ DEFAULT_BACKOFF_MULTIPLIER: float = 2.0
44
+ """Default multiplier for exponential backoff (delay *= multiplier after each retry)."""
45
+
46
+ DEFAULT_JITTER: float = 0.0
47
+ """Default jitter factor (0.0 to 1.0) to add randomness to delays.
48
+
49
+ A jitter of 0.1 means +/- 10% random variation on the calculated delay.
50
+ """
51
+
52
+
53
+ class RetryArgs(TypedDict, total=False):
54
+ """Arguments for configuring retry behavior.
55
+
56
+ This TypedDict is used for the user-facing API where all fields are optional.
57
+ Use RetryConfig for the internal representation with defaults applied.
58
+
59
+ Attributes:
60
+ max_retries: Maximum number of retries after the initial attempt fails.
61
+ Defaults to 3.
62
+ retry_on: Tuple of exception types that should trigger a retry.
63
+ Defaults to DEFAULT_RETRYABLE_ERRORS (ConnectionError, RateLimitError,
64
+ ServerError, TimeoutError).
65
+ initial_delay: Initial delay in seconds before the first retry. Defaults to 0.5.
66
+ max_delay: Maximum delay in seconds between retries. Defaults to 60.0.
67
+ backoff_multiplier: Multiplier for exponential backoff. Defaults to 2.0.
68
+ jitter: Jitter factor (0.0 to 1.0) to add randomness to delays. Defaults to 0.0.
69
+ """
70
+
71
+ max_retries: int
72
+ """Maximum number of retries after the initial attempt fails. Defaults to 3."""
73
+
74
+ retry_on: tuple[type[Exception], ...]
75
+ """Tuple of exception types that should trigger a retry.
76
+
77
+ Defaults to DEFAULT_RETRYABLE_ERRORS (ConnectionError, RateLimitError,
78
+ ServerError, TimeoutError).
79
+ """
80
+
81
+ initial_delay: float
82
+ """Initial delay in seconds before the first retry. Defaults to 1.0."""
83
+
84
+ max_delay: float
85
+ """Maximum delay in seconds between retries. Defaults to 60.0."""
86
+
87
+ backoff_multiplier: float
88
+ """Multiplier for exponential backoff (delay *= multiplier after each retry). Defaults to 2.0."""
89
+
90
+ jitter: float
91
+ """Jitter factor (0.0 to 1.0) to add randomness to delays. Defaults to 0.0."""
92
+
93
+ fallback_models: "Sequence[Model | ModelId]"
94
+ """Sequence of fallback models to try if the primary model fails.
95
+
96
+ Each model gets its own full retry budget (max_retries applies per model).
97
+ ModelId strings inherit params from the primary model; Model instances
98
+ use their own params. Defaults to empty tuple (no fallbacks).
99
+ """
100
+
101
+
102
+ @dataclass(frozen=True)
103
+ class RetryConfig:
104
+ """Configuration for retry behavior with defaults applied.
105
+
106
+ Attributes:
107
+ max_retries: Maximum number of retries after the initial attempt fails.
108
+ retry_on: Tuple of exception types that should trigger a retry.
109
+ initial_delay: Initial delay in seconds before the first retry.
110
+ max_delay: Maximum delay in seconds between retries.
111
+ backoff_multiplier: Multiplier for exponential backoff.
112
+ jitter: Jitter factor (0.0 to 1.0) to add randomness to delays.
113
+ """
114
+
115
+ max_retries: int = DEFAULT_MAX_RETRIES
116
+ """Maximum number of retries after the initial attempt fails."""
117
+
118
+ retry_on: tuple[type[Exception], ...] = DEFAULT_RETRYABLE_ERRORS
119
+ """Tuple of exception types that should trigger a retry."""
120
+
121
+ fallback_models: "tuple[Model | ModelId, ...]" = ()
122
+ """Sequence of fallback models to try if the primary model fails.
123
+
124
+ Each model gets its own full retry budget (max_retries applies per model).
125
+ ModelId strings inherit params from the primary model; Model instances
126
+ use their own params. Defaults to empty tuple (no fallbacks).
127
+ """
128
+
129
+ initial_delay: float = DEFAULT_INITIAL_DELAY
130
+ """Initial delay in seconds before the first retry."""
131
+
132
+ max_delay: float = DEFAULT_MAX_DELAY
133
+ """Maximum delay in seconds between retries."""
134
+
135
+ backoff_multiplier: float = DEFAULT_BACKOFF_MULTIPLIER
136
+ """Multiplier for exponential backoff (delay *= multiplier after each retry)."""
137
+
138
+ jitter: float = DEFAULT_JITTER
139
+ """Jitter factor (0.0 to 1.0) to add randomness to delays."""
140
+
141
+ def __post_init__(self) -> None:
142
+ """Validate configuration values."""
143
+ if self.max_retries < 0:
144
+ raise ValueError("max_retries must be non-negative")
145
+ if self.initial_delay < 0:
146
+ raise ValueError("initial_delay must be non-negative")
147
+ if self.max_delay < 0:
148
+ raise ValueError("max_delay must be non-negative")
149
+ if self.backoff_multiplier < 1:
150
+ raise ValueError("backoff_multiplier must be >= 1")
151
+ if not 0 <= self.jitter <= 1:
152
+ raise ValueError("jitter must be between 0.0 and 1.0")
153
+
154
+ @classmethod
155
+ def from_args(cls, **args: Unpack[RetryArgs]) -> "RetryConfig":
156
+ """Create a RetryConfig from RetryArgs kwargs."""
157
+ fallback_models = args.get("fallback_models", ())
158
+ return cls(
159
+ max_retries=args.get("max_retries", DEFAULT_MAX_RETRIES),
160
+ retry_on=args.get("retry_on", DEFAULT_RETRYABLE_ERRORS),
161
+ fallback_models=tuple(fallback_models),
162
+ initial_delay=args.get("initial_delay", DEFAULT_INITIAL_DELAY),
163
+ max_delay=args.get("max_delay", DEFAULT_MAX_DELAY),
164
+ backoff_multiplier=args.get(
165
+ "backoff_multiplier", DEFAULT_BACKOFF_MULTIPLIER
166
+ ),
167
+ jitter=args.get("jitter", DEFAULT_JITTER),
168
+ )
@@ -0,0 +1,258 @@
1
+ """The @llm.retry decorator for adding retry logic to prompts and calls."""
2
+
3
+ from typing import overload
4
+ from typing_extensions import Unpack
5
+
6
+ from ..calls import AsyncCall, AsyncContextCall, Call, ContextCall
7
+ from ..context import DepsT
8
+ from ..formatting import FormattableT
9
+ from ..prompts import AsyncContextPrompt, AsyncPrompt, ContextPrompt, Prompt
10
+ from ..types import P
11
+ from .retry_calls import (
12
+ AsyncRetryCall,
13
+ RetryCall,
14
+ )
15
+ from .retry_config import RetryArgs, RetryConfig
16
+ from .retry_prompts import (
17
+ AsyncRetryPrompt,
18
+ RetryPrompt,
19
+ )
20
+
21
+ RetryTarget = (
22
+ ContextPrompt[P, DepsT, FormattableT]
23
+ | AsyncContextPrompt[P, DepsT, FormattableT]
24
+ | Prompt[P, FormattableT]
25
+ | RetryPrompt[P, FormattableT]
26
+ | AsyncPrompt[P, FormattableT]
27
+ | AsyncRetryPrompt[P, FormattableT]
28
+ | ContextCall[P, DepsT, FormattableT]
29
+ | AsyncContextCall[P, DepsT, FormattableT]
30
+ | Call[P, FormattableT]
31
+ | RetryCall[P, FormattableT]
32
+ | AsyncCall[P, FormattableT]
33
+ | AsyncRetryCall[P, FormattableT]
34
+ )
35
+ """Union type for all targets that can be wrapped with retry logic.
36
+
37
+ Includes Prompts and Calls (both sync and async variants).
38
+ For models, use `llm.retry_model()` instead.
39
+ """
40
+
41
+ RetryResult = (
42
+ RetryPrompt[P, FormattableT]
43
+ | AsyncRetryPrompt[P, FormattableT]
44
+ | RetryCall[P, FormattableT]
45
+ | AsyncRetryCall[P, FormattableT]
46
+ )
47
+ """Union type for all retry-wrapped results.
48
+
49
+ When a target is wrapped with retry logic, it returns one of these types.
50
+ """
51
+
52
+
53
+ # Overloads for direct wrapping: retry(target, ...)
54
+
55
+
56
+ @overload
57
+ def retry(
58
+ target: Prompt[P, FormattableT] | RetryPrompt[P, FormattableT],
59
+ **config: Unpack[RetryArgs],
60
+ ) -> RetryPrompt[P, FormattableT]:
61
+ """Wrap a Prompt with retry logic."""
62
+ ...
63
+
64
+
65
+ @overload
66
+ def retry(
67
+ target: AsyncPrompt[P, FormattableT] | AsyncRetryPrompt[P, FormattableT],
68
+ **config: Unpack[RetryArgs],
69
+ ) -> AsyncRetryPrompt[P, FormattableT]:
70
+ """Wrap an AsyncPrompt with retry logic."""
71
+ ...
72
+
73
+
74
+ @overload
75
+ def retry(
76
+ target: Call[P, FormattableT] | RetryCall[P, FormattableT],
77
+ **config: Unpack[RetryArgs],
78
+ ) -> RetryCall[P, FormattableT]:
79
+ """Wrap a Call with retry logic."""
80
+ ...
81
+
82
+
83
+ @overload
84
+ def retry(
85
+ target: AsyncCall[P, FormattableT] | AsyncRetryCall[P, FormattableT],
86
+ **config: Unpack[RetryArgs],
87
+ ) -> AsyncRetryCall[P, FormattableT]:
88
+ """Wrap an AsyncCall with retry logic."""
89
+ ...
90
+
91
+
92
+ # Overloads for decorator usage: @retry(...)
93
+
94
+
95
+ @overload
96
+ def retry(
97
+ target: None = None,
98
+ **config: Unpack[RetryArgs],
99
+ ) -> "RetryDecorator":
100
+ """Return a decorator that adds retry logic."""
101
+ ...
102
+
103
+
104
+ class RetryDecorator:
105
+ """A decorator class that wraps targets with retry logic.
106
+
107
+ This class is returned when `retry()` is called without a target,
108
+ allowing it to be used as a parameterized decorator.
109
+ """
110
+
111
+ def __init__(self, config: RetryConfig) -> None:
112
+ self._config = config
113
+
114
+ @overload
115
+ def __call__(
116
+ self, target: Prompt[P, FormattableT] | RetryPrompt[P, FormattableT]
117
+ ) -> RetryPrompt[P, FormattableT]: ...
118
+
119
+ @overload
120
+ def __call__(
121
+ self, target: AsyncPrompt[P, FormattableT] | AsyncRetryPrompt[P, FormattableT]
122
+ ) -> AsyncRetryPrompt[P, FormattableT]: ...
123
+
124
+ @overload
125
+ def __call__(
126
+ self, target: Call[P, FormattableT] | RetryCall[P, FormattableT]
127
+ ) -> RetryCall[P, FormattableT]: ...
128
+
129
+ @overload
130
+ def __call__(
131
+ self, target: AsyncCall[P, FormattableT] | AsyncRetryCall[P, FormattableT]
132
+ ) -> AsyncRetryCall[P, FormattableT]: ...
133
+
134
+ def __call__(
135
+ self,
136
+ target: RetryTarget[P, DepsT, FormattableT],
137
+ ) -> RetryResult[P, FormattableT]:
138
+ """Apply retry logic to the target."""
139
+ return _wrap_target(target, self._config)
140
+
141
+
142
+ def _wrap_target(
143
+ target: RetryTarget[P, DepsT, FormattableT],
144
+ retry_config: RetryConfig,
145
+ ) -> RetryResult[P, FormattableT]:
146
+ """Internal function to wrap a target with retry logic."""
147
+ # Wrap RetryCall variants first - they inherit from BaseCall, not Call/AsyncCall
148
+ if isinstance(target, AsyncRetryCall):
149
+ prompt = target.prompt
150
+ return AsyncRetryCall(
151
+ default_model=target.default_model,
152
+ prompt=AsyncRetryPrompt(
153
+ fn=prompt.fn,
154
+ toolkit=prompt.toolkit,
155
+ format=prompt.format,
156
+ retry_config=retry_config,
157
+ ),
158
+ )
159
+ if isinstance(target, RetryCall):
160
+ prompt = target.prompt
161
+ return RetryCall(
162
+ default_model=target.default_model,
163
+ prompt=RetryPrompt(
164
+ fn=prompt.fn,
165
+ toolkit=prompt.toolkit,
166
+ format=prompt.format,
167
+ retry_config=retry_config,
168
+ ),
169
+ )
170
+
171
+ # Wrap Call variants - AsyncCall check must come before Call
172
+ if isinstance(target, AsyncCall):
173
+ prompt = target.prompt
174
+ return AsyncRetryCall(
175
+ default_model=target.default_model,
176
+ prompt=AsyncRetryPrompt(
177
+ fn=prompt.fn,
178
+ toolkit=prompt.toolkit,
179
+ format=prompt.format,
180
+ retry_config=retry_config,
181
+ ),
182
+ )
183
+ if isinstance(target, Call):
184
+ prompt = target.prompt
185
+ return RetryCall(
186
+ default_model=target.default_model,
187
+ prompt=RetryPrompt(
188
+ fn=prompt.fn,
189
+ toolkit=prompt.toolkit,
190
+ format=prompt.format,
191
+ retry_config=retry_config,
192
+ ),
193
+ )
194
+
195
+ # Wrap Prompt variants - AsyncPrompt check must come before Prompt
196
+ if isinstance(target, AsyncPrompt):
197
+ return AsyncRetryPrompt(
198
+ fn=target.fn,
199
+ toolkit=target.toolkit,
200
+ format=target.format,
201
+ retry_config=retry_config,
202
+ )
203
+ if isinstance(target, Prompt):
204
+ return RetryPrompt(
205
+ fn=target.fn,
206
+ toolkit=target.toolkit,
207
+ format=target.format,
208
+ retry_config=retry_config,
209
+ )
210
+
211
+ raise ValueError(f"Unsupported target type for retry: {type(target)}")
212
+
213
+
214
+ def retry(
215
+ target: RetryTarget[P, DepsT, FormattableT] | None = None,
216
+ **config: Unpack[RetryArgs],
217
+ ) -> RetryResult[P, FormattableT] | RetryDecorator:
218
+ """Add retry logic to a Prompt or Call.
219
+
220
+ This function can be used in two ways:
221
+
222
+ 1. Direct wrapping:
223
+ ```python
224
+ retry_call = llm.retry(my_call, max_retries=2)
225
+ ```
226
+
227
+ 2. As a decorator:
228
+ ```python
229
+ @llm.retry(max_retries=2)
230
+ @llm.call("openai/gpt-4")
231
+ def my_call() -> str:
232
+ return "Hello"
233
+ ```
234
+
235
+ This function wraps the provided target with retry capabilities, allowing it to
236
+ automatically retry on failures, handle rate limits, and use fallback models.
237
+ The retry configuration can be provided as keyword arguments.
238
+
239
+ For models, use `llm.retry_model()` instead.
240
+
241
+ If a RetryPrompt or RetryCall is passed, then this will return a new
242
+ RetryPrompt or RetryCall using the retry_config passed to the function.
243
+
244
+ Args:
245
+ target: The Prompt or Call to wrap with retry logic. If None,
246
+ returns a decorator that can be applied to a target.
247
+ **config: Configuration for retry behavior (see RetryArgs).
248
+
249
+ Returns:
250
+ A retry-wrapped version of the target that returns Retry* response types,
251
+ or a RetryDecorator if no target is provided.
252
+ """
253
+ retry_config = RetryConfig.from_args(**config)
254
+
255
+ if target is None:
256
+ return RetryDecorator(retry_config)
257
+
258
+ return _wrap_target(target, retry_config)