mirascope 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. mirascope/api/_generated/functions/client.py +10 -0
  2. mirascope/api/_generated/functions/raw_client.py +8 -0
  3. mirascope/api/_generated/functions/types/functions_create_response.py +25 -8
  4. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +25 -10
  5. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +1 -0
  6. mirascope/api/_generated/functions/types/functions_get_response.py +25 -8
  7. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +1 -0
  8. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +22 -7
  9. mirascope/api/_generated/reference.md +9 -0
  10. mirascope/llm/__init__.py +42 -0
  11. mirascope/llm/calls/calls.py +38 -11
  12. mirascope/llm/exceptions.py +69 -0
  13. mirascope/llm/prompts/prompts.py +47 -9
  14. mirascope/llm/providers/__init__.py +3 -0
  15. mirascope/llm/providers/openai/completions/_utils/__init__.py +3 -0
  16. mirascope/llm/providers/openai/completions/_utils/encode.py +27 -32
  17. mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
  18. mirascope/llm/providers/openai/completions/base_provider.py +21 -0
  19. mirascope/llm/providers/openai/completions/provider.py +8 -2
  20. mirascope/llm/providers/openrouter/__init__.py +5 -0
  21. mirascope/llm/providers/openrouter/provider.py +67 -0
  22. mirascope/llm/providers/provider_id.py +2 -0
  23. mirascope/llm/providers/provider_registry.py +6 -0
  24. mirascope/llm/responses/response.py +217 -0
  25. mirascope/llm/responses/stream_response.py +234 -0
  26. mirascope/llm/retries/__init__.py +51 -0
  27. mirascope/llm/retries/retry_calls.py +159 -0
  28. mirascope/llm/retries/retry_config.py +168 -0
  29. mirascope/llm/retries/retry_decorator.py +258 -0
  30. mirascope/llm/retries/retry_models.py +1313 -0
  31. mirascope/llm/retries/retry_prompts.py +227 -0
  32. mirascope/llm/retries/retry_responses.py +340 -0
  33. mirascope/llm/retries/retry_stream_responses.py +571 -0
  34. mirascope/llm/retries/utils.py +159 -0
  35. mirascope/ops/_internal/versioned_calls.py +249 -9
  36. mirascope/ops/_internal/versioned_functions.py +2 -0
  37. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/METADATA +1 -1
  38. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/RECORD +40 -28
  39. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/WHEEL +0 -0
  40. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,571 @@
1
+ """Retry stream response wrappers that add retry capabilities to streaming LLM responses."""
2
+
3
+ from collections.abc import AsyncIterator, Awaitable, Callable, Iterator
4
+ from typing import TYPE_CHECKING, Generic, overload
5
+
6
+ from ..content import (
7
+ AssistantContentChunk,
8
+ )
9
+ from ..context import Context, DepsT
10
+ from ..exceptions import RetriesExhausted, StreamRestarted
11
+ from ..formatting import FormattableT
12
+ from ..messages import UserContent
13
+ from ..models import Model
14
+ from ..responses import (
15
+ AsyncContextStreamResponse,
16
+ AsyncStreamResponse,
17
+ ContextStreamResponse,
18
+ StreamResponse,
19
+ )
20
+ from .utils import RetryFailure, get_retry_model_from_context
21
+
22
+ if TYPE_CHECKING:
23
+ from .retry_config import RetryConfig
24
+ from .retry_models import RetryModel
25
+
26
+
27
+ class RetryStreamResponse(StreamResponse[FormattableT]):
28
+ """A streaming response wrapper that includes retry capabilities.
29
+
30
+ Extends `StreamResponse` directly, copying all attributes from a wrapped response
31
+ and adding retry configuration.
32
+
33
+ This wraps a `StreamResponse` and adds automatic retry behavior when
34
+ retryable errors occur during iteration. When a retry happens, a
35
+ `StreamRestarted` exception is raised so the user can handle the restart
36
+ (e.g., clear terminal output) before re-iterating.
37
+
38
+ Supports fallback models - when the active model exhausts its retries,
39
+ the next fallback model is tried.
40
+
41
+ Example:
42
+ ```python
43
+ response = retry_model.stream("Tell me a story")
44
+
45
+ while True:
46
+ try:
47
+ for chunk in response.text_stream():
48
+ print(chunk, end="", flush=True)
49
+ break # Success
50
+ except llm.StreamRestarted as e:
51
+ print(e.message)
52
+ ```
53
+ """
54
+
55
+ _current_variant: "RetryModel"
56
+ """The current RetryModel variant being used."""
57
+
58
+ _variants_iter: Iterator["RetryModel"]
59
+ """Iterator over model variants with backoff delays."""
60
+
61
+ retry_failures: list[RetryFailure]
62
+ """Failed attempts before success (empty if first attempt succeeded)."""
63
+
64
+ _stream_fn: Callable[[Model], StreamResponse[FormattableT]]
65
+
66
+ def __init__(
67
+ self,
68
+ retry_model: "RetryModel",
69
+ stream_fn: Callable[[Model], StreamResponse[FormattableT]],
70
+ ) -> None:
71
+ """Initialize a RetryStreamResponse.
72
+
73
+ Args:
74
+ retry_model: The RetryModel providing retry configuration.
75
+ stream_fn: Function that creates a stream from a Model.
76
+ """
77
+ self._variants_iter = retry_model.variants()
78
+ self._current_variant = next(self._variants_iter)
79
+ self._stream_fn = stream_fn
80
+ self.retry_failures = []
81
+
82
+ # Create the initial stream and copy all attributes
83
+ initial_stream = stream_fn(self._current_variant.get_active_model())
84
+ for key, value in initial_stream.__dict__.items():
85
+ object.__setattr__(self, key, value)
86
+
87
+ @property
88
+ def model(self) -> "RetryModel":
89
+ """A RetryModel with parameters matching this response.
90
+
91
+ If a model is set in context (via `llm.model()` or `llm.retry_model()`),
92
+ that model is used instead, wrapped with this response's retry config.
93
+ """
94
+ return get_retry_model_from_context(self._current_variant)
95
+
96
+ @property
97
+ def retry_config(self) -> "RetryConfig":
98
+ """The retry configuration for this response."""
99
+ return self.model.retry_config
100
+
101
+ def _reset_stream(self, variant: "RetryModel") -> None:
102
+ """Reset to a fresh stream for a new retry attempt."""
103
+ new_stream = self._stream_fn(variant.get_active_model())
104
+ # Copy all attributes from the new stream
105
+ for key, value in new_stream.__dict__.items():
106
+ object.__setattr__(self, key, value)
107
+
108
+ def chunk_stream(self) -> Iterator[AssistantContentChunk]:
109
+ """Returns an iterator that yields content chunks with retry support.
110
+
111
+ If a retryable error occurs during iteration, the stream is reset
112
+ and a `StreamRestarted` exception is raised. The user should catch
113
+ this exception and re-iterate to continue from the new attempt.
114
+
115
+ Raises:
116
+ StreamRestarted: When the stream is reset for a retry attempt.
117
+ Exception: The underlying error if max retries are exhausted.
118
+ """
119
+ config = self._current_variant.retry_config
120
+ try:
121
+ yield from super().chunk_stream()
122
+ except config.retry_on as e:
123
+ failure = RetryFailure(
124
+ model=self._current_variant.get_active_model(), exception=e
125
+ )
126
+ self.retry_failures.append(failure)
127
+
128
+ # Try to get next variant (handles backoff and fallback)
129
+ try:
130
+ self._current_variant = next(self._variants_iter)
131
+ except StopIteration:
132
+ raise RetriesExhausted(self.retry_failures) from None
133
+
134
+ self._reset_stream(self._current_variant)
135
+
136
+ raise StreamRestarted(
137
+ failure=failure,
138
+ ) from e
139
+
140
+ @overload
141
+ def resume(
142
+ self: "RetryStreamResponse[None]", content: UserContent
143
+ ) -> "RetryStreamResponse[None]": ...
144
+
145
+ @overload
146
+ def resume(
147
+ self: "RetryStreamResponse[FormattableT]", content: UserContent
148
+ ) -> "RetryStreamResponse[FormattableT]": ...
149
+
150
+ def resume(
151
+ self, content: UserContent
152
+ ) -> "RetryStreamResponse[None] | RetryStreamResponse[FormattableT]":
153
+ """Generate a new RetryStreamResponse using this response's messages with additional user content."""
154
+ return self.model.resume_stream(response=self, content=content)
155
+
156
+
157
+ class AsyncRetryStreamResponse(AsyncStreamResponse[FormattableT]):
158
+ """An async streaming response wrapper that includes retry capabilities.
159
+
160
+ Extends `AsyncStreamResponse` directly, copying all attributes from a wrapped response
161
+ and adding retry configuration.
162
+
163
+ This wraps an `AsyncStreamResponse` and adds automatic retry behavior when
164
+ retryable errors occur during iteration. When a retry happens, a
165
+ `StreamRestarted` exception is raised so the user can handle the restart
166
+ (e.g., clear terminal output) before re-iterating.
167
+
168
+ Supports fallback models - when the active model exhausts its retries,
169
+ the next fallback model is tried.
170
+
171
+ Example:
172
+ ```python
173
+ response = await retry_model.stream_async("Tell me a story")
174
+
175
+ while True:
176
+ try:
177
+ async for chunk in response.text_stream():
178
+ print(chunk, end="", flush=True)
179
+ break # Success
180
+ except llm.StreamRestarted as e:
181
+ print(e.message)
182
+ ```
183
+ """
184
+
185
+ _current_variant: "RetryModel"
186
+ """The current RetryModel variant being used."""
187
+
188
+ _variants_iter: AsyncIterator["RetryModel"]
189
+ """Async iterator over model variants with backoff delays."""
190
+
191
+ retry_failures: list[RetryFailure]
192
+ """Failed attempts before success (empty if first attempt succeeded)."""
193
+
194
+ _stream_fn: Callable[[Model], Awaitable[AsyncStreamResponse[FormattableT]]]
195
+
196
+ def __init__(
197
+ self,
198
+ stream_fn: Callable[[Model], Awaitable[AsyncStreamResponse[FormattableT]]],
199
+ initial_stream: AsyncStreamResponse[FormattableT],
200
+ initial_variant: "RetryModel",
201
+ variants_iter: AsyncIterator["RetryModel"],
202
+ ) -> None:
203
+ """Initialize an AsyncRetryStreamResponse.
204
+
205
+ Args:
206
+ stream_fn: Async function that creates a stream from a Model.
207
+ initial_stream: The pre-awaited initial stream.
208
+ initial_variant: The first variant from the iterator.
209
+ variants_iter: The async iterator for remaining variants.
210
+ """
211
+ # Copy all attributes from the initial stream
212
+ for key, value in initial_stream.__dict__.items():
213
+ object.__setattr__(self, key, value)
214
+
215
+ self._current_variant = initial_variant
216
+ self._variants_iter = variants_iter
217
+ self._stream_fn = stream_fn
218
+ self.retry_failures = []
219
+
220
+ @property
221
+ def model(self) -> "RetryModel":
222
+ """A RetryModel with parameters matching this response.
223
+
224
+ If a model is set in context (via `llm.model()` or `llm.retry_model()`),
225
+ that model is used instead, wrapped with this response's retry config.
226
+ """
227
+ return get_retry_model_from_context(self._current_variant)
228
+
229
+ @property
230
+ def retry_config(self) -> "RetryConfig":
231
+ """The retry configuration for this response."""
232
+ return self.model.retry_config
233
+
234
+ async def _reset_stream(self, variant: "RetryModel") -> None:
235
+ """Reset to a fresh stream for a new retry attempt."""
236
+ new_stream = await self._stream_fn(variant.get_active_model())
237
+ # Copy all attributes from the new stream
238
+ for key, value in new_stream.__dict__.items():
239
+ object.__setattr__(self, key, value)
240
+
241
+ async def chunk_stream(self) -> AsyncIterator[AssistantContentChunk]:
242
+ """Returns an async iterator that yields content chunks with retry support.
243
+
244
+ If a retryable error occurs during iteration, the stream is reset
245
+ and a `StreamRestarted` exception is raised. The user should catch
246
+ this exception and re-iterate to continue from the new attempt.
247
+
248
+ Raises:
249
+ StreamRestarted: When the stream is reset for a retry attempt.
250
+ Exception: The underlying error if max retries are exhausted.
251
+ """
252
+ config = self._current_variant.retry_config
253
+ try:
254
+ async for chunk in super().chunk_stream():
255
+ yield chunk
256
+ except config.retry_on as e:
257
+ failure = RetryFailure(
258
+ model=self._current_variant.get_active_model(), exception=e
259
+ )
260
+ self.retry_failures.append(failure)
261
+
262
+ # Try to get next variant (handles backoff and fallback)
263
+ try:
264
+ self._current_variant = await anext(self._variants_iter)
265
+ except StopAsyncIteration:
266
+ raise RetriesExhausted(self.retry_failures) from None
267
+
268
+ await self._reset_stream(self._current_variant)
269
+
270
+ raise StreamRestarted(
271
+ failure=failure,
272
+ ) from e
273
+
274
+ @overload
275
+ async def resume(
276
+ self: "AsyncRetryStreamResponse[None]", content: UserContent
277
+ ) -> "AsyncRetryStreamResponse[None]": ...
278
+
279
+ @overload
280
+ async def resume(
281
+ self: "AsyncRetryStreamResponse[FormattableT]", content: UserContent
282
+ ) -> "AsyncRetryStreamResponse[FormattableT]": ...
283
+
284
+ async def resume(
285
+ self, content: UserContent
286
+ ) -> "AsyncRetryStreamResponse[None] | AsyncRetryStreamResponse[FormattableT]":
287
+ """Generate a new AsyncRetryStreamResponse using this response's messages with additional user content."""
288
+ return await self.model.resume_stream_async(response=self, content=content)
289
+
290
+
291
+ class ContextRetryStreamResponse(
292
+ ContextStreamResponse[DepsT, FormattableT], Generic[DepsT, FormattableT]
293
+ ):
294
+ """A context-aware streaming response wrapper that includes retry capabilities.
295
+
296
+ Extends `ContextStreamResponse` directly, copying all attributes from a wrapped response
297
+ and adding retry configuration.
298
+
299
+ This wraps a `ContextStreamResponse` and adds automatic retry behavior when
300
+ retryable errors occur during iteration. When a retry happens, a
301
+ `StreamRestarted` exception is raised so the user can handle the restart
302
+ (e.g., clear terminal output) before re-iterating.
303
+
304
+ Supports fallback models - when the active model exhausts its retries,
305
+ the next fallback model is tried.
306
+
307
+ Example:
308
+ ```python
309
+ response = retry_model.context_stream("Tell me a story", ctx=ctx)
310
+
311
+ while True:
312
+ try:
313
+ for chunk in response.text_stream():
314
+ print(chunk, end="", flush=True)
315
+ break # Success
316
+ except llm.StreamRestarted as e:
317
+ print(e.message)
318
+ ```
319
+ """
320
+
321
+ _current_variant: "RetryModel"
322
+ """The current RetryModel variant being used."""
323
+
324
+ _variants_iter: Iterator["RetryModel"]
325
+ """Iterator over model variants with backoff delays."""
326
+
327
+ retry_failures: list[RetryFailure]
328
+ """Failed attempts before success (empty if first attempt succeeded)."""
329
+
330
+ _stream_fn: Callable[[Model], ContextStreamResponse[DepsT, FormattableT]]
331
+
332
+ def __init__(
333
+ self,
334
+ retry_model: "RetryModel",
335
+ stream_fn: Callable[[Model], ContextStreamResponse[DepsT, FormattableT]],
336
+ ) -> None:
337
+ """Initialize a ContextRetryStreamResponse.
338
+
339
+ Args:
340
+ retry_model: The RetryModel providing retry configuration.
341
+ stream_fn: Function that creates a stream from a Model.
342
+ """
343
+ self._variants_iter = retry_model.variants()
344
+ self._current_variant = next(self._variants_iter)
345
+ self._stream_fn = stream_fn
346
+ self.retry_failures = []
347
+
348
+ # Create the initial stream and copy all attributes
349
+ initial_stream = stream_fn(self._current_variant.get_active_model())
350
+ for key, value in initial_stream.__dict__.items():
351
+ object.__setattr__(self, key, value)
352
+
353
+ @property
354
+ def model(self) -> "RetryModel":
355
+ """A RetryModel with parameters matching this response.
356
+
357
+ If a model is set in context (via `llm.model()` or `llm.retry_model()`),
358
+ that model is used instead, wrapped with this response's retry config.
359
+ """
360
+ return get_retry_model_from_context(self._current_variant)
361
+
362
+ @property
363
+ def retry_config(self) -> "RetryConfig":
364
+ """The retry configuration for this response."""
365
+ return self.model.retry_config
366
+
367
+ def _reset_stream(self, variant: "RetryModel") -> None:
368
+ """Reset to a fresh stream for a new retry attempt."""
369
+ new_stream = self._stream_fn(variant.get_active_model())
370
+ # Copy all attributes from the new stream
371
+ for key, value in new_stream.__dict__.items():
372
+ object.__setattr__(self, key, value)
373
+
374
+ def chunk_stream(self) -> Iterator[AssistantContentChunk]:
375
+ """Returns an iterator that yields content chunks with retry support.
376
+
377
+ If a retryable error occurs during iteration, the stream is reset
378
+ and a `StreamRestarted` exception is raised. The user should catch
379
+ this exception and re-iterate to continue from the new attempt.
380
+
381
+ Raises:
382
+ StreamRestarted: When the stream is reset for a retry attempt.
383
+ Exception: The underlying error if max retries are exhausted.
384
+ """
385
+ config = self._current_variant.retry_config
386
+ try:
387
+ yield from super().chunk_stream()
388
+ except config.retry_on as e:
389
+ failure = RetryFailure(
390
+ model=self._current_variant.get_active_model(), exception=e
391
+ )
392
+ self.retry_failures.append(failure)
393
+
394
+ # Try to get next variant (handles backoff and fallback)
395
+ try:
396
+ self._current_variant = next(self._variants_iter)
397
+ except StopIteration:
398
+ raise RetriesExhausted(self.retry_failures) from None
399
+
400
+ self._reset_stream(self._current_variant)
401
+
402
+ raise StreamRestarted(
403
+ failure=failure,
404
+ ) from e
405
+
406
+ @overload
407
+ def resume(
408
+ self: "ContextRetryStreamResponse[DepsT, None]",
409
+ ctx: Context[DepsT],
410
+ content: UserContent,
411
+ ) -> "ContextRetryStreamResponse[DepsT, None]": ...
412
+
413
+ @overload
414
+ def resume(
415
+ self: "ContextRetryStreamResponse[DepsT, FormattableT]",
416
+ ctx: Context[DepsT],
417
+ content: UserContent,
418
+ ) -> "ContextRetryStreamResponse[DepsT, FormattableT]": ...
419
+
420
+ def resume(
421
+ self, ctx: Context[DepsT], content: UserContent
422
+ ) -> "ContextRetryStreamResponse[DepsT, None] | ContextRetryStreamResponse[DepsT, FormattableT]":
423
+ """Generate a new ContextRetryStreamResponse using this response's messages with additional user content."""
424
+ return self.model.context_resume_stream(ctx=ctx, response=self, content=content)
425
+
426
+
427
+ class AsyncContextRetryStreamResponse(
428
+ AsyncContextStreamResponse[DepsT, FormattableT], Generic[DepsT, FormattableT]
429
+ ):
430
+ """An async context-aware streaming response wrapper that includes retry capabilities.
431
+
432
+ Extends `AsyncContextStreamResponse` directly, copying all attributes from a wrapped response
433
+ and adding retry configuration.
434
+
435
+ This wraps an `AsyncContextStreamResponse` and adds automatic retry behavior when
436
+ retryable errors occur during iteration. When a retry happens, a
437
+ `StreamRestarted` exception is raised so the user can handle the restart
438
+ (e.g., clear terminal output) before re-iterating.
439
+
440
+ Supports fallback models - when the active model exhausts its retries,
441
+ the next fallback model is tried.
442
+
443
+ Example:
444
+ ```python
445
+ ctx = llm.Context(deps=my_deps)
446
+ response = await retry_model.context_stream_async("Tell me a story", ctx=ctx)
447
+
448
+ while True:
449
+ try:
450
+ async for chunk in response.text_stream():
451
+ print(chunk, end="", flush=True)
452
+ break # Success
453
+ except llm.StreamRestarted as e:
454
+ print(e.message)
455
+ ```
456
+ """
457
+
458
+ _current_variant: "RetryModel"
459
+ """The current RetryModel variant being used."""
460
+
461
+ _variants_iter: AsyncIterator["RetryModel"]
462
+ """Async iterator over model variants with backoff delays."""
463
+
464
+ retry_failures: list[RetryFailure]
465
+ """Failed attempts before success (empty if first attempt succeeded)."""
466
+
467
+ _stream_fn: Callable[
468
+ [Model], Awaitable[AsyncContextStreamResponse[DepsT, FormattableT]]
469
+ ]
470
+
471
+ def __init__(
472
+ self,
473
+ stream_fn: Callable[
474
+ [Model], Awaitable[AsyncContextStreamResponse[DepsT, FormattableT]]
475
+ ],
476
+ initial_stream: AsyncContextStreamResponse[DepsT, FormattableT],
477
+ initial_variant: "RetryModel",
478
+ variants_iter: AsyncIterator["RetryModel"],
479
+ ) -> None:
480
+ """Initialize an AsyncContextRetryStreamResponse.
481
+
482
+ Args:
483
+ stream_fn: Async function that creates a stream from a Model.
484
+ initial_stream: The pre-awaited initial stream.
485
+ initial_variant: The first variant from the iterator.
486
+ variants_iter: The async iterator for remaining variants.
487
+ """
488
+ # Copy all attributes from the initial stream
489
+ for key, value in initial_stream.__dict__.items():
490
+ object.__setattr__(self, key, value)
491
+
492
+ self._current_variant = initial_variant
493
+ self._variants_iter = variants_iter
494
+ self._stream_fn = stream_fn
495
+ self.retry_failures = []
496
+
497
+ @property
498
+ def model(self) -> "RetryModel":
499
+ """A RetryModel with parameters matching this response.
500
+
501
+ If a model is set in context (via `llm.model()` or `llm.retry_model()`),
502
+ that model is used instead, wrapped with this response's retry config.
503
+ """
504
+ return get_retry_model_from_context(self._current_variant)
505
+
506
+ @property
507
+ def retry_config(self) -> "RetryConfig":
508
+ """The retry configuration for this response."""
509
+ return self.model.retry_config
510
+
511
+ async def _reset_stream(self, variant: "RetryModel") -> None:
512
+ """Reset to a fresh stream for a new retry attempt."""
513
+ new_stream = await self._stream_fn(variant.get_active_model())
514
+ # Copy all attributes from the new stream
515
+ for key, value in new_stream.__dict__.items():
516
+ object.__setattr__(self, key, value)
517
+
518
+ async def chunk_stream(self) -> AsyncIterator[AssistantContentChunk]:
519
+ """Returns an async iterator that yields content chunks with retry support.
520
+
521
+ If a retryable error occurs during iteration, the stream is reset
522
+ and a `StreamRestarted` exception is raised. The user should catch
523
+ this exception and re-iterate to continue from the new attempt.
524
+
525
+ Raises:
526
+ StreamRestarted: When the stream is reset for a retry attempt.
527
+ Exception: The underlying error if max retries are exhausted.
528
+ """
529
+ config = self._current_variant.retry_config
530
+ try:
531
+ async for chunk in super().chunk_stream():
532
+ yield chunk
533
+ except config.retry_on as e:
534
+ failure = RetryFailure(
535
+ model=self._current_variant.get_active_model(), exception=e
536
+ )
537
+ self.retry_failures.append(failure)
538
+
539
+ # Try to get next variant (handles backoff and fallback)
540
+ try:
541
+ self._current_variant = await anext(self._variants_iter)
542
+ except StopAsyncIteration:
543
+ raise RetriesExhausted(self.retry_failures) from None
544
+
545
+ await self._reset_stream(self._current_variant)
546
+
547
+ raise StreamRestarted(
548
+ failure=failure,
549
+ ) from e
550
+
551
+ @overload
552
+ async def resume(
553
+ self: "AsyncContextRetryStreamResponse[DepsT, None]",
554
+ ctx: Context[DepsT],
555
+ content: UserContent,
556
+ ) -> "AsyncContextRetryStreamResponse[DepsT, None]": ...
557
+
558
+ @overload
559
+ async def resume(
560
+ self: "AsyncContextRetryStreamResponse[DepsT, FormattableT]",
561
+ ctx: Context[DepsT],
562
+ content: UserContent,
563
+ ) -> "AsyncContextRetryStreamResponse[DepsT, FormattableT]": ...
564
+
565
+ async def resume(
566
+ self, ctx: Context[DepsT], content: UserContent
567
+ ) -> "AsyncContextRetryStreamResponse[DepsT, None] | AsyncContextRetryStreamResponse[DepsT, FormattableT]":
568
+ """Generate a new AsyncContextRetryStreamResponse using this response's messages with additional user content."""
569
+ return await self.model.context_resume_stream_async(
570
+ ctx=ctx, response=self, content=content
571
+ )