mirascope 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/api/_generated/functions/client.py +10 -0
- mirascope/api/_generated/functions/raw_client.py +8 -0
- mirascope/api/_generated/functions/types/functions_create_response.py +25 -8
- mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +25 -10
- mirascope/api/_generated/functions/types/functions_get_by_env_response.py +1 -0
- mirascope/api/_generated/functions/types/functions_get_response.py +25 -8
- mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +1 -0
- mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +22 -7
- mirascope/api/_generated/reference.md +9 -0
- mirascope/llm/__init__.py +42 -0
- mirascope/llm/calls/calls.py +38 -11
- mirascope/llm/exceptions.py +69 -0
- mirascope/llm/prompts/prompts.py +47 -9
- mirascope/llm/providers/__init__.py +3 -0
- mirascope/llm/providers/openai/completions/_utils/__init__.py +3 -0
- mirascope/llm/providers/openai/completions/_utils/encode.py +27 -32
- mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
- mirascope/llm/providers/openai/completions/base_provider.py +21 -0
- mirascope/llm/providers/openai/completions/provider.py +8 -2
- mirascope/llm/providers/openrouter/__init__.py +5 -0
- mirascope/llm/providers/openrouter/provider.py +67 -0
- mirascope/llm/providers/provider_id.py +2 -0
- mirascope/llm/providers/provider_registry.py +6 -0
- mirascope/llm/responses/response.py +217 -0
- mirascope/llm/responses/stream_response.py +234 -0
- mirascope/llm/retries/__init__.py +51 -0
- mirascope/llm/retries/retry_calls.py +159 -0
- mirascope/llm/retries/retry_config.py +168 -0
- mirascope/llm/retries/retry_decorator.py +258 -0
- mirascope/llm/retries/retry_models.py +1313 -0
- mirascope/llm/retries/retry_prompts.py +227 -0
- mirascope/llm/retries/retry_responses.py +340 -0
- mirascope/llm/retries/retry_stream_responses.py +571 -0
- mirascope/llm/retries/utils.py +159 -0
- mirascope/ops/_internal/versioned_calls.py +249 -9
- mirascope/ops/_internal/versioned_functions.py +2 -0
- {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/METADATA +1 -1
- {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/RECORD +40 -28
- {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/WHEEL +0 -0
- {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""RetryPrompt extends Prompt to return RetryResponse instead of Response."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from typing import Any, Generic, overload
|
|
5
|
+
|
|
6
|
+
from ..formatting import FormatSpec, FormattableT
|
|
7
|
+
from ..models import Model
|
|
8
|
+
from ..prompts import (
|
|
9
|
+
AsyncPrompt,
|
|
10
|
+
Prompt,
|
|
11
|
+
)
|
|
12
|
+
from ..prompts.prompts import BasePrompt
|
|
13
|
+
from ..prompts.protocols import AsyncMessageTemplate, MessageTemplate
|
|
14
|
+
from ..providers import ModelId
|
|
15
|
+
from ..tools import AsyncToolkit, Toolkit
|
|
16
|
+
from ..types import P
|
|
17
|
+
from .retry_config import RetryConfig
|
|
18
|
+
from .retry_models import RetryModel
|
|
19
|
+
from .retry_responses import (
|
|
20
|
+
AsyncRetryResponse,
|
|
21
|
+
RetryResponse,
|
|
22
|
+
)
|
|
23
|
+
from .retry_stream_responses import (
|
|
24
|
+
AsyncRetryStreamResponse,
|
|
25
|
+
RetryStreamResponse,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _ensure_retry_model(model: Model | ModelId, config: RetryConfig) -> RetryModel:
|
|
30
|
+
"""Ensure a model has retry capabilities, adding them if necessary."""
|
|
31
|
+
if isinstance(model, RetryModel):
|
|
32
|
+
return model
|
|
33
|
+
if isinstance(model, str):
|
|
34
|
+
model = Model(model)
|
|
35
|
+
return RetryModel(model=model, retry_config=config)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class BaseRetryPrompt(BasePrompt[Callable[..., Any]]):
|
|
39
|
+
"""Base class for retry-enabled prompts that adds retry_config."""
|
|
40
|
+
|
|
41
|
+
retry_config: RetryConfig
|
|
42
|
+
"""Configuration for retry behavior."""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class RetryPrompt(BaseRetryPrompt, Prompt[P, FormattableT], Generic[P, FormattableT]):
|
|
46
|
+
"""A retry-enabled prompt that extends Prompt and returns RetryResponse.
|
|
47
|
+
|
|
48
|
+
This extends Prompt and overrides call methods to return RetryResponse
|
|
49
|
+
instead of Response. It handles wrapping the provided Model in a RetryModel
|
|
50
|
+
if necessary.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
*,
|
|
56
|
+
fn: MessageTemplate[P],
|
|
57
|
+
toolkit: Toolkit,
|
|
58
|
+
format: FormatSpec[FormattableT] | None,
|
|
59
|
+
retry_config: RetryConfig,
|
|
60
|
+
) -> None:
|
|
61
|
+
super().__init__(fn=fn, toolkit=toolkit, format=format)
|
|
62
|
+
self.retry_config = retry_config
|
|
63
|
+
|
|
64
|
+
@overload
|
|
65
|
+
def __call__(
|
|
66
|
+
self: "RetryPrompt[P, None]",
|
|
67
|
+
model: Model | ModelId,
|
|
68
|
+
*args: P.args,
|
|
69
|
+
**kwargs: P.kwargs,
|
|
70
|
+
) -> RetryResponse[None]: ...
|
|
71
|
+
|
|
72
|
+
@overload
|
|
73
|
+
def __call__(
|
|
74
|
+
self: "RetryPrompt[P, FormattableT]",
|
|
75
|
+
model: Model | ModelId,
|
|
76
|
+
*args: P.args,
|
|
77
|
+
**kwargs: P.kwargs,
|
|
78
|
+
) -> RetryResponse[FormattableT]: ...
|
|
79
|
+
|
|
80
|
+
def __call__(
|
|
81
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
82
|
+
) -> RetryResponse[None] | RetryResponse[FormattableT]:
|
|
83
|
+
"""Generates a retry response using the provided model."""
|
|
84
|
+
return self.call(model, *args, **kwargs)
|
|
85
|
+
|
|
86
|
+
@overload
|
|
87
|
+
def call(
|
|
88
|
+
self: "RetryPrompt[P, None]",
|
|
89
|
+
model: Model | ModelId,
|
|
90
|
+
*args: P.args,
|
|
91
|
+
**kwargs: P.kwargs,
|
|
92
|
+
) -> RetryResponse[None]: ...
|
|
93
|
+
|
|
94
|
+
@overload
|
|
95
|
+
def call(
|
|
96
|
+
self: "RetryPrompt[P, FormattableT]",
|
|
97
|
+
model: Model | ModelId,
|
|
98
|
+
*args: P.args,
|
|
99
|
+
**kwargs: P.kwargs,
|
|
100
|
+
) -> RetryResponse[FormattableT]: ...
|
|
101
|
+
|
|
102
|
+
def call(
|
|
103
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
104
|
+
) -> RetryResponse[None] | RetryResponse[FormattableT]:
|
|
105
|
+
"""Generates a retry response using the provided model."""
|
|
106
|
+
retry_model = _ensure_retry_model(model, self.retry_config)
|
|
107
|
+
messages = self.messages(*args, **kwargs)
|
|
108
|
+
return retry_model.call(
|
|
109
|
+
content=messages, tools=self.toolkit, format=self.format
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
@overload
|
|
113
|
+
def stream(
|
|
114
|
+
self: "RetryPrompt[P, None]",
|
|
115
|
+
model: Model | ModelId,
|
|
116
|
+
*args: P.args,
|
|
117
|
+
**kwargs: P.kwargs,
|
|
118
|
+
) -> RetryStreamResponse[None]: ...
|
|
119
|
+
|
|
120
|
+
@overload
|
|
121
|
+
def stream(
|
|
122
|
+
self: "RetryPrompt[P, FormattableT]",
|
|
123
|
+
model: Model | ModelId,
|
|
124
|
+
*args: P.args,
|
|
125
|
+
**kwargs: P.kwargs,
|
|
126
|
+
) -> RetryStreamResponse[FormattableT]: ...
|
|
127
|
+
|
|
128
|
+
def stream(
|
|
129
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
130
|
+
) -> RetryStreamResponse[None] | RetryStreamResponse[FormattableT]:
|
|
131
|
+
"""Generates a retry stream response using the provided model."""
|
|
132
|
+
retry_model = _ensure_retry_model(model, self.retry_config)
|
|
133
|
+
messages = self.messages(*args, **kwargs)
|
|
134
|
+
return retry_model.stream(
|
|
135
|
+
content=messages, tools=self.toolkit, format=self.format
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class AsyncRetryPrompt(
|
|
140
|
+
BaseRetryPrompt, AsyncPrompt[P, FormattableT], Generic[P, FormattableT]
|
|
141
|
+
):
|
|
142
|
+
"""An async retry-enabled prompt that extends AsyncPrompt and returns AsyncRetryResponse."""
|
|
143
|
+
|
|
144
|
+
def __init__(
|
|
145
|
+
self,
|
|
146
|
+
*,
|
|
147
|
+
fn: AsyncMessageTemplate[P],
|
|
148
|
+
toolkit: AsyncToolkit,
|
|
149
|
+
format: FormatSpec[FormattableT] | None,
|
|
150
|
+
retry_config: RetryConfig,
|
|
151
|
+
) -> None:
|
|
152
|
+
super().__init__(fn=fn, toolkit=toolkit, format=format)
|
|
153
|
+
self.retry_config = retry_config
|
|
154
|
+
|
|
155
|
+
@overload
|
|
156
|
+
async def __call__(
|
|
157
|
+
self: "AsyncRetryPrompt[P, None]",
|
|
158
|
+
model: Model | ModelId,
|
|
159
|
+
*args: P.args,
|
|
160
|
+
**kwargs: P.kwargs,
|
|
161
|
+
) -> AsyncRetryResponse[None]: ...
|
|
162
|
+
|
|
163
|
+
@overload
|
|
164
|
+
async def __call__(
|
|
165
|
+
self: "AsyncRetryPrompt[P, FormattableT]",
|
|
166
|
+
model: Model | ModelId,
|
|
167
|
+
*args: P.args,
|
|
168
|
+
**kwargs: P.kwargs,
|
|
169
|
+
) -> AsyncRetryResponse[FormattableT]: ...
|
|
170
|
+
|
|
171
|
+
async def __call__(
|
|
172
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
173
|
+
) -> AsyncRetryResponse[None] | AsyncRetryResponse[FormattableT]:
|
|
174
|
+
"""Generates a retry response using the provided model asynchronously."""
|
|
175
|
+
return await self.call(model, *args, **kwargs)
|
|
176
|
+
|
|
177
|
+
@overload
|
|
178
|
+
async def call(
|
|
179
|
+
self: "AsyncRetryPrompt[P, None]",
|
|
180
|
+
model: Model | ModelId,
|
|
181
|
+
*args: P.args,
|
|
182
|
+
**kwargs: P.kwargs,
|
|
183
|
+
) -> AsyncRetryResponse[None]: ...
|
|
184
|
+
|
|
185
|
+
@overload
|
|
186
|
+
async def call(
|
|
187
|
+
self: "AsyncRetryPrompt[P, FormattableT]",
|
|
188
|
+
model: Model | ModelId,
|
|
189
|
+
*args: P.args,
|
|
190
|
+
**kwargs: P.kwargs,
|
|
191
|
+
) -> AsyncRetryResponse[FormattableT]: ...
|
|
192
|
+
|
|
193
|
+
async def call(
|
|
194
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
195
|
+
) -> AsyncRetryResponse[None] | AsyncRetryResponse[FormattableT]:
|
|
196
|
+
"""Generates a retry response using the provided model asynchronously."""
|
|
197
|
+
retry_model = _ensure_retry_model(model, self.retry_config)
|
|
198
|
+
messages = await self.messages(*args, **kwargs)
|
|
199
|
+
return await retry_model.call_async(
|
|
200
|
+
content=messages, tools=self.toolkit, format=self.format
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
@overload
|
|
204
|
+
async def stream(
|
|
205
|
+
self: "AsyncRetryPrompt[P, None]",
|
|
206
|
+
model: Model | ModelId,
|
|
207
|
+
*args: P.args,
|
|
208
|
+
**kwargs: P.kwargs,
|
|
209
|
+
) -> AsyncRetryStreamResponse[None]: ...
|
|
210
|
+
|
|
211
|
+
@overload
|
|
212
|
+
async def stream(
|
|
213
|
+
self: "AsyncRetryPrompt[P, FormattableT]",
|
|
214
|
+
model: Model | ModelId,
|
|
215
|
+
*args: P.args,
|
|
216
|
+
**kwargs: P.kwargs,
|
|
217
|
+
) -> AsyncRetryStreamResponse[FormattableT]: ...
|
|
218
|
+
|
|
219
|
+
async def stream(
|
|
220
|
+
self, model: Model | ModelId, *args: P.args, **kwargs: P.kwargs
|
|
221
|
+
) -> AsyncRetryStreamResponse[None] | AsyncRetryStreamResponse[FormattableT]:
|
|
222
|
+
"""Generates a retry stream response using the provided model asynchronously."""
|
|
223
|
+
retry_model = _ensure_retry_model(model, self.retry_config)
|
|
224
|
+
messages = await self.messages(*args, **kwargs)
|
|
225
|
+
return await retry_model.stream_async(
|
|
226
|
+
content=messages, tools=self.toolkit, format=self.format
|
|
227
|
+
)
|
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
"""Retry response classes that extend base responses with retry metadata."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Generic, overload
|
|
4
|
+
|
|
5
|
+
from ..context import Context, DepsT
|
|
6
|
+
from ..formatting import FormattableT
|
|
7
|
+
from ..messages import UserContent
|
|
8
|
+
from ..responses import (
|
|
9
|
+
AsyncContextResponse,
|
|
10
|
+
AsyncResponse,
|
|
11
|
+
ContextResponse,
|
|
12
|
+
Response,
|
|
13
|
+
)
|
|
14
|
+
from .utils import RetryFailure, get_retry_model_from_context
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from .retry_config import RetryConfig
|
|
18
|
+
from .retry_models import RetryModel
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RetryResponse(Response[FormattableT]):
|
|
22
|
+
"""Response that includes retry metadata.
|
|
23
|
+
|
|
24
|
+
Extends `Response` directly, copying all attributes from a wrapped response
|
|
25
|
+
and adding retry configuration. The `model` property returns a `RetryModel`
|
|
26
|
+
with the successful model as the active model.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
_retry_model: "RetryModel"
|
|
30
|
+
"""The RetryModel with the successful model as active."""
|
|
31
|
+
|
|
32
|
+
retry_failures: list[RetryFailure]
|
|
33
|
+
"""Failed attempts before the successful one (empty if first attempt succeeded)."""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
response: Response[FormattableT],
|
|
38
|
+
retry_model: "RetryModel",
|
|
39
|
+
retry_failures: list[RetryFailure],
|
|
40
|
+
) -> None:
|
|
41
|
+
"""Initialize a RetryResponse.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
response: The successful response from the LLM.
|
|
45
|
+
retry_model: RetryModel with the successful model as active.
|
|
46
|
+
retry_failures: List of failed attempts before success.
|
|
47
|
+
"""
|
|
48
|
+
# Copy all attributes from the wrapped response
|
|
49
|
+
for key, value in response.__dict__.items():
|
|
50
|
+
object.__setattr__(self, key, value)
|
|
51
|
+
self._retry_model = retry_model
|
|
52
|
+
self.retry_failures = retry_failures
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def model(self) -> "RetryModel":
|
|
56
|
+
"""A RetryModel with the successful model as active.
|
|
57
|
+
|
|
58
|
+
If a model is set in context (via `llm.model()` or `llm.retry_model()`),
|
|
59
|
+
that model is used instead, wrapped with this response's retry config.
|
|
60
|
+
|
|
61
|
+
Otherwise, this RetryModel has the same pool of available models, but with
|
|
62
|
+
the model that succeeded set as the active model. This means:
|
|
63
|
+
- `response.model.model_id` equals `response.model_id`
|
|
64
|
+
- `response.resume()` will try the successful model first
|
|
65
|
+
"""
|
|
66
|
+
return get_retry_model_from_context(self._retry_model)
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def retry_config(self) -> "RetryConfig":
|
|
70
|
+
"""The retry configuration for this response."""
|
|
71
|
+
return self.model.retry_config
|
|
72
|
+
|
|
73
|
+
@overload
|
|
74
|
+
def resume(
|
|
75
|
+
self: "RetryResponse[None]", content: UserContent
|
|
76
|
+
) -> "RetryResponse[None]": ...
|
|
77
|
+
|
|
78
|
+
@overload
|
|
79
|
+
def resume(
|
|
80
|
+
self: "RetryResponse[FormattableT]", content: UserContent
|
|
81
|
+
) -> "RetryResponse[FormattableT]": ...
|
|
82
|
+
|
|
83
|
+
def resume(
|
|
84
|
+
self, content: UserContent
|
|
85
|
+
) -> "RetryResponse[None] | RetryResponse[FormattableT]":
|
|
86
|
+
"""Generate a new RetryResponse using this response's messages with additional user content.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
content: The new user message content to append to the message history.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
A new RetryResponse instance generated from the extended message history.
|
|
93
|
+
"""
|
|
94
|
+
return self.model.resume(response=self, content=content)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class AsyncRetryResponse(AsyncResponse[FormattableT]):
|
|
98
|
+
"""Async response that includes retry metadata.
|
|
99
|
+
|
|
100
|
+
Extends `AsyncResponse` directly, copying all attributes from a wrapped response
|
|
101
|
+
and adding retry configuration. The `model` property returns a `RetryModel`
|
|
102
|
+
with the successful model as the active model.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
_retry_model: "RetryModel"
|
|
106
|
+
"""The RetryModel with the successful model as active."""
|
|
107
|
+
|
|
108
|
+
retry_failures: list[RetryFailure]
|
|
109
|
+
"""Failed attempts before the successful one (empty if first attempt succeeded)."""
|
|
110
|
+
|
|
111
|
+
def __init__(
|
|
112
|
+
self,
|
|
113
|
+
response: AsyncResponse[FormattableT],
|
|
114
|
+
retry_model: "RetryModel",
|
|
115
|
+
retry_failures: list[RetryFailure],
|
|
116
|
+
) -> None:
|
|
117
|
+
"""Initialize an AsyncRetryResponse.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
response: The successful async response from the LLM.
|
|
121
|
+
retry_model: RetryModel with the successful model as active.
|
|
122
|
+
retry_failures: List of failed attempts before success.
|
|
123
|
+
"""
|
|
124
|
+
# Copy all attributes from the wrapped response
|
|
125
|
+
for key, value in response.__dict__.items():
|
|
126
|
+
object.__setattr__(self, key, value)
|
|
127
|
+
self._retry_model = retry_model
|
|
128
|
+
self.retry_failures = retry_failures
|
|
129
|
+
|
|
130
|
+
@property
|
|
131
|
+
def model(self) -> "RetryModel":
|
|
132
|
+
"""A RetryModel with the successful model as active.
|
|
133
|
+
|
|
134
|
+
If a model is set in context (via `llm.model()` or `llm.retry_model()`),
|
|
135
|
+
that model is used instead, wrapped with this response's retry config.
|
|
136
|
+
|
|
137
|
+
Otherwise, this RetryModel has the same pool of available models, but with
|
|
138
|
+
the model that succeeded set as the active model. This means:
|
|
139
|
+
- `response.model.model_id` equals `response.model_id`
|
|
140
|
+
- `response.resume()` will try the successful model first
|
|
141
|
+
"""
|
|
142
|
+
return get_retry_model_from_context(self._retry_model)
|
|
143
|
+
|
|
144
|
+
@property
|
|
145
|
+
def retry_config(self) -> "RetryConfig":
|
|
146
|
+
"""The retry configuration for this response."""
|
|
147
|
+
return self.model.retry_config
|
|
148
|
+
|
|
149
|
+
@overload
|
|
150
|
+
async def resume(
|
|
151
|
+
self: "AsyncRetryResponse[None]", content: UserContent
|
|
152
|
+
) -> "AsyncRetryResponse[None]": ...
|
|
153
|
+
|
|
154
|
+
@overload
|
|
155
|
+
async def resume(
|
|
156
|
+
self: "AsyncRetryResponse[FormattableT]", content: UserContent
|
|
157
|
+
) -> "AsyncRetryResponse[FormattableT]": ...
|
|
158
|
+
|
|
159
|
+
async def resume(
|
|
160
|
+
self, content: UserContent
|
|
161
|
+
) -> "AsyncRetryResponse[None] | AsyncRetryResponse[FormattableT]":
|
|
162
|
+
"""Generate a new AsyncRetryResponse using this response's messages with additional user content.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
content: The new user message content to append to the message history.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
A new AsyncRetryResponse instance generated from the extended message history.
|
|
169
|
+
"""
|
|
170
|
+
return await self.model.resume_async(response=self, content=content)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class ContextRetryResponse(
|
|
174
|
+
ContextResponse[DepsT, FormattableT], Generic[DepsT, FormattableT]
|
|
175
|
+
):
|
|
176
|
+
"""Context response that includes retry metadata.
|
|
177
|
+
|
|
178
|
+
Extends `ContextResponse` directly, copying all attributes from a wrapped response
|
|
179
|
+
and adding retry configuration. The `model` property returns a `RetryModel`
|
|
180
|
+
with the successful model as the active model.
|
|
181
|
+
"""
|
|
182
|
+
|
|
183
|
+
_retry_model: "RetryModel"
|
|
184
|
+
"""The RetryModel with the successful model as active."""
|
|
185
|
+
|
|
186
|
+
retry_failures: list[RetryFailure]
|
|
187
|
+
"""Failed attempts before the successful one (empty if first attempt succeeded)."""
|
|
188
|
+
|
|
189
|
+
def __init__(
|
|
190
|
+
self,
|
|
191
|
+
response: ContextResponse[DepsT, FormattableT],
|
|
192
|
+
retry_model: "RetryModel",
|
|
193
|
+
retry_failures: list[RetryFailure],
|
|
194
|
+
) -> None:
|
|
195
|
+
"""Initialize a ContextRetryResponse.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
response: The successful context response from the LLM.
|
|
199
|
+
retry_model: RetryModel with the successful model as active.
|
|
200
|
+
retry_failures: List of failed attempts before success.
|
|
201
|
+
"""
|
|
202
|
+
# Copy all attributes from the wrapped response
|
|
203
|
+
for key, value in response.__dict__.items():
|
|
204
|
+
object.__setattr__(self, key, value)
|
|
205
|
+
self._retry_model = retry_model
|
|
206
|
+
self.retry_failures = retry_failures
|
|
207
|
+
|
|
208
|
+
@property
|
|
209
|
+
def model(self) -> "RetryModel":
|
|
210
|
+
"""A RetryModel with the successful model as active.
|
|
211
|
+
|
|
212
|
+
If a model is set in context (via `llm.model()` or `llm.retry_model()`),
|
|
213
|
+
that model is used instead, wrapped with this response's retry config.
|
|
214
|
+
|
|
215
|
+
Otherwise, this RetryModel has the same pool of available models, but with
|
|
216
|
+
the model that succeeded set as the active model. This means:
|
|
217
|
+
- `response.model.model_id` equals `response.model_id`
|
|
218
|
+
- `response.resume()` will try the successful model first
|
|
219
|
+
"""
|
|
220
|
+
return get_retry_model_from_context(self._retry_model)
|
|
221
|
+
|
|
222
|
+
@property
|
|
223
|
+
def retry_config(self) -> "RetryConfig":
|
|
224
|
+
"""The retry configuration for this response."""
|
|
225
|
+
return self.model.retry_config
|
|
226
|
+
|
|
227
|
+
@overload
|
|
228
|
+
def resume(
|
|
229
|
+
self: "ContextRetryResponse[DepsT, None]",
|
|
230
|
+
ctx: Context[DepsT],
|
|
231
|
+
content: UserContent,
|
|
232
|
+
) -> "ContextRetryResponse[DepsT, None]": ...
|
|
233
|
+
|
|
234
|
+
@overload
|
|
235
|
+
def resume(
|
|
236
|
+
self: "ContextRetryResponse[DepsT, FormattableT]",
|
|
237
|
+
ctx: Context[DepsT],
|
|
238
|
+
content: UserContent,
|
|
239
|
+
) -> "ContextRetryResponse[DepsT, FormattableT]": ...
|
|
240
|
+
|
|
241
|
+
def resume(
|
|
242
|
+
self, ctx: Context[DepsT], content: UserContent
|
|
243
|
+
) -> (
|
|
244
|
+
"ContextRetryResponse[DepsT, None] | ContextRetryResponse[DepsT, FormattableT]"
|
|
245
|
+
):
|
|
246
|
+
"""Generate a new ContextRetryResponse using this response's messages with additional user content.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
ctx: A `Context` with the required deps type.
|
|
250
|
+
content: The new user message content to append to the message history.
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
A new ContextRetryResponse instance generated from the extended message history.
|
|
254
|
+
"""
|
|
255
|
+
return self.model.context_resume(ctx=ctx, response=self, content=content)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class AsyncContextRetryResponse(
|
|
259
|
+
AsyncContextResponse[DepsT, FormattableT], Generic[DepsT, FormattableT]
|
|
260
|
+
):
|
|
261
|
+
"""Async context response that includes retry metadata.
|
|
262
|
+
|
|
263
|
+
Extends `AsyncContextResponse` directly, copying all attributes from a wrapped response
|
|
264
|
+
and adding retry configuration. The `model` property returns a `RetryModel`
|
|
265
|
+
with the successful model as the active model.
|
|
266
|
+
"""
|
|
267
|
+
|
|
268
|
+
_retry_model: "RetryModel"
|
|
269
|
+
"""The RetryModel with the successful model as active."""
|
|
270
|
+
|
|
271
|
+
retry_failures: list[RetryFailure]
|
|
272
|
+
"""Failed attempts before the successful one (empty if first attempt succeeded)."""
|
|
273
|
+
|
|
274
|
+
def __init__(
|
|
275
|
+
self,
|
|
276
|
+
response: AsyncContextResponse[DepsT, FormattableT],
|
|
277
|
+
retry_model: "RetryModel",
|
|
278
|
+
retry_failures: list[RetryFailure],
|
|
279
|
+
) -> None:
|
|
280
|
+
"""Initialize an AsyncContextRetryResponse.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
response: The successful async context response from the LLM.
|
|
284
|
+
retry_model: RetryModel with the successful model as active.
|
|
285
|
+
retry_failures: List of failed attempts before success.
|
|
286
|
+
"""
|
|
287
|
+
# Copy all attributes from the wrapped response
|
|
288
|
+
for key, value in response.__dict__.items():
|
|
289
|
+
object.__setattr__(self, key, value)
|
|
290
|
+
self._retry_model = retry_model
|
|
291
|
+
self.retry_failures = retry_failures
|
|
292
|
+
|
|
293
|
+
@property
|
|
294
|
+
def model(self) -> "RetryModel":
|
|
295
|
+
"""A RetryModel with the successful model as active.
|
|
296
|
+
|
|
297
|
+
If a model is set in context (via `llm.model()` or `llm.retry_model()`),
|
|
298
|
+
that model is used instead, wrapped with this response's retry config.
|
|
299
|
+
|
|
300
|
+
Otherwise, this RetryModel has the same pool of available models, but with
|
|
301
|
+
the model that succeeded set as the active model. This means:
|
|
302
|
+
- `response.model.model_id` equals `response.model_id`
|
|
303
|
+
- `response.resume()` will try the successful model first
|
|
304
|
+
"""
|
|
305
|
+
return get_retry_model_from_context(self._retry_model)
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def retry_config(self) -> "RetryConfig":
|
|
309
|
+
"""The retry configuration for this response."""
|
|
310
|
+
return self.model.retry_config
|
|
311
|
+
|
|
312
|
+
@overload
|
|
313
|
+
async def resume(
|
|
314
|
+
self: "AsyncContextRetryResponse[DepsT, None]",
|
|
315
|
+
ctx: Context[DepsT],
|
|
316
|
+
content: UserContent,
|
|
317
|
+
) -> "AsyncContextRetryResponse[DepsT, None]": ...
|
|
318
|
+
|
|
319
|
+
@overload
|
|
320
|
+
async def resume(
|
|
321
|
+
self: "AsyncContextRetryResponse[DepsT, FormattableT]",
|
|
322
|
+
ctx: Context[DepsT],
|
|
323
|
+
content: UserContent,
|
|
324
|
+
) -> "AsyncContextRetryResponse[DepsT, FormattableT]": ...
|
|
325
|
+
|
|
326
|
+
async def resume(
|
|
327
|
+
self, ctx: Context[DepsT], content: UserContent
|
|
328
|
+
) -> "AsyncContextRetryResponse[DepsT, None] | AsyncContextRetryResponse[DepsT, FormattableT]":
|
|
329
|
+
"""Generate a new AsyncContextRetryResponse using this response's messages with additional user content.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
ctx: A `Context` with the required deps type.
|
|
333
|
+
content: The new user message content to append to the message history.
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
A new AsyncContextRetryResponse instance generated from the extended message history.
|
|
337
|
+
"""
|
|
338
|
+
return await self.model.context_resume_async(
|
|
339
|
+
ctx=ctx, response=self, content=content
|
|
340
|
+
)
|