mirascope 2.1.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. mirascope/api/_generated/functions/client.py +10 -0
  2. mirascope/api/_generated/functions/raw_client.py +8 -0
  3. mirascope/api/_generated/functions/types/functions_create_response.py +25 -8
  4. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +25 -10
  5. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +1 -0
  6. mirascope/api/_generated/functions/types/functions_get_response.py +25 -8
  7. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +1 -0
  8. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +22 -7
  9. mirascope/api/_generated/reference.md +9 -0
  10. mirascope/llm/__init__.py +42 -0
  11. mirascope/llm/calls/calls.py +38 -11
  12. mirascope/llm/exceptions.py +69 -0
  13. mirascope/llm/prompts/prompts.py +47 -9
  14. mirascope/llm/providers/__init__.py +3 -0
  15. mirascope/llm/providers/openai/completions/_utils/__init__.py +3 -0
  16. mirascope/llm/providers/openai/completions/_utils/encode.py +27 -32
  17. mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
  18. mirascope/llm/providers/openai/completions/base_provider.py +21 -0
  19. mirascope/llm/providers/openai/completions/provider.py +8 -2
  20. mirascope/llm/providers/openrouter/__init__.py +5 -0
  21. mirascope/llm/providers/openrouter/provider.py +67 -0
  22. mirascope/llm/providers/provider_id.py +2 -0
  23. mirascope/llm/providers/provider_registry.py +6 -0
  24. mirascope/llm/responses/response.py +217 -0
  25. mirascope/llm/responses/stream_response.py +234 -0
  26. mirascope/llm/retries/__init__.py +51 -0
  27. mirascope/llm/retries/retry_calls.py +159 -0
  28. mirascope/llm/retries/retry_config.py +168 -0
  29. mirascope/llm/retries/retry_decorator.py +258 -0
  30. mirascope/llm/retries/retry_models.py +1313 -0
  31. mirascope/llm/retries/retry_prompts.py +227 -0
  32. mirascope/llm/retries/retry_responses.py +340 -0
  33. mirascope/llm/retries/retry_stream_responses.py +571 -0
  34. mirascope/llm/retries/utils.py +159 -0
  35. mirascope/ops/_internal/versioned_calls.py +249 -9
  36. mirascope/ops/_internal/versioned_functions.py +2 -0
  37. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/METADATA +1 -1
  38. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/RECORD +40 -28
  39. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/WHEEL +0 -0
  40. {mirascope-2.1.0.dist-info → mirascope-2.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -6,6 +6,7 @@ from typing import TYPE_CHECKING, Any, Generic, overload
6
6
 
7
7
  from ..content import ToolOutput
8
8
  from ..context import Context, DepsT
9
+ from ..exceptions import ParseError
9
10
  from ..formatting import Format, FormattableT
10
11
  from ..messages import AssistantMessage, Message, UserContent
11
12
  from ..tools import (
@@ -100,6 +101,56 @@ class Response(BaseResponse[Toolkit, FormattableT]):
100
101
  content=content,
101
102
  )
102
103
 
104
+ @overload
105
+ def validate(
106
+ self: "Response[None]", max_retries: int = 1
107
+ ) -> tuple[None, "Response[None]"]: ...
108
+
109
+ @overload
110
+ def validate(
111
+ self: "Response[FormattableT]", max_retries: int = 1
112
+ ) -> tuple[FormattableT, "Response[FormattableT]"]: ...
113
+
114
+ def validate(
115
+ self, max_retries: int = 1
116
+ ) -> tuple[FormattableT | None, "Response[FormattableT] | Response[None]"]:
117
+ """Parse and validate the response, retrying on parse errors.
118
+
119
+ Attempts to parse the response. On `ParseError`, asks the LLM to fix its
120
+ output by resuming with the error message. Returns both the parsed value
121
+ and the (potentially updated) response.
122
+
123
+ Args:
124
+ max_retries: Maximum number of retry attempts on parse failure.
125
+ Defaults to 1 (2 total attempts). Must be non-negative.
126
+
127
+ Returns:
128
+ A tuple of (parsed_value, response). If parsing succeeded on the first
129
+ attempt, returns (value, self). If retries were needed, returns
130
+ (value, new_response) where new_response is the final successful response.
131
+
132
+ Raises:
133
+ ValueError: If max_retries is negative.
134
+ ParseError: If parsing fails after exhausting all retry attempts.
135
+ Error: If the LLM call fails while generating a retry response.
136
+ """
137
+ if max_retries < 0:
138
+ raise ValueError("max_retries must be non-negative")
139
+
140
+ if self.format is None:
141
+ return None, self
142
+
143
+ current_response: Response[FormattableT] = self
144
+ for attempt in range(max_retries + 1):
145
+ try:
146
+ return current_response.parse(), current_response
147
+ except ParseError as e:
148
+ if attempt == max_retries:
149
+ raise
150
+ current_response = current_response.resume(e.retry_message())
151
+
152
+ raise AssertionError("Unreachable") # pragma: no cover
153
+
103
154
 
104
155
  class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
105
156
  """The response generated by an LLM in async mode."""
@@ -180,6 +231,58 @@ class AsyncResponse(BaseResponse[AsyncToolkit, FormattableT]):
180
231
  content=content,
181
232
  )
182
233
 
234
+ @overload
235
+ async def validate(
236
+ self: "AsyncResponse[None]", max_retries: int = 1
237
+ ) -> tuple[None, "AsyncResponse[None]"]: ...
238
+
239
+ @overload
240
+ async def validate(
241
+ self: "AsyncResponse[FormattableT]", max_retries: int = 1
242
+ ) -> tuple[FormattableT, "AsyncResponse[FormattableT]"]: ...
243
+
244
+ async def validate(
245
+ self, max_retries: int = 1
246
+ ) -> tuple[
247
+ FormattableT | None, "AsyncResponse[FormattableT] | AsyncResponse[None]"
248
+ ]:
249
+ """Parse and validate the response, retrying on parse errors.
250
+
251
+ Attempts to parse the response. On `ParseError`, asks the LLM to fix its
252
+ output by resuming with the error message. Returns both the parsed value
253
+ and the (potentially updated) response.
254
+
255
+ Args:
256
+ max_retries: Maximum number of retry attempts on parse failure.
257
+ Defaults to 1 (2 total attempts). Must be non-negative.
258
+
259
+ Returns:
260
+ A tuple of (parsed_value, response). If parsing succeeded on the first
261
+ attempt, returns (value, self). If retries were needed, returns
262
+ (value, new_response) where new_response is the final successful response.
263
+
264
+ Raises:
265
+ ValueError: If max_retries is negative.
266
+ ParseError: If parsing fails after exhausting all retry attempts.
267
+ Error: If the LLM call fails while generating a retry response.
268
+ """
269
+ if max_retries < 0:
270
+ raise ValueError("max_retries must be non-negative")
271
+
272
+ if self.format is None:
273
+ return None, self
274
+
275
+ current_response: AsyncResponse[FormattableT] = self
276
+ for attempt in range(max_retries + 1):
277
+ try:
278
+ return current_response.parse(), current_response
279
+ except ParseError as e:
280
+ if attempt == max_retries:
281
+ raise
282
+ current_response = await current_response.resume(e.retry_message())
283
+
284
+ raise AssertionError("Unreachable") # pragma: no cover
285
+
183
286
 
184
287
  class ContextResponse(
185
288
  BaseResponse[ContextToolkit[DepsT], FormattableT], Generic[DepsT, FormattableT]
@@ -268,6 +371,62 @@ class ContextResponse(
268
371
  content=content,
269
372
  )
270
373
 
374
+ @overload
375
+ def validate(
376
+ self: "ContextResponse[DepsT, None]", ctx: Context[DepsT], max_retries: int = 1
377
+ ) -> tuple[None, "ContextResponse[DepsT, None]"]: ...
378
+
379
+ @overload
380
+ def validate(
381
+ self: "ContextResponse[DepsT, FormattableT]",
382
+ ctx: Context[DepsT],
383
+ max_retries: int = 1,
384
+ ) -> tuple[FormattableT, "ContextResponse[DepsT, FormattableT]"]: ...
385
+
386
+ def validate(
387
+ self, ctx: Context[DepsT], max_retries: int = 1
388
+ ) -> tuple[
389
+ FormattableT | None,
390
+ "ContextResponse[DepsT, FormattableT] | ContextResponse[DepsT, None]",
391
+ ]:
392
+ """Parse and validate the response, retrying on parse errors.
393
+
394
+ Attempts to parse the response. On `ParseError`, asks the LLM to fix its
395
+ output by resuming with the error message. Returns both the parsed value
396
+ and the (potentially updated) response.
397
+
398
+ Args:
399
+ ctx: A `Context` with the required deps type.
400
+ max_retries: Maximum number of retry attempts on parse failure.
401
+ Defaults to 1 (2 total attempts). Must be non-negative.
402
+
403
+ Returns:
404
+ A tuple of (parsed_value, response). If parsing succeeded on the first
405
+ attempt, returns (value, self). If retries were needed, returns
406
+ (value, new_response) where new_response is the final successful response.
407
+
408
+ Raises:
409
+ ValueError: If max_retries is negative.
410
+ ParseError: If parsing fails after exhausting all retry attempts.
411
+ Error: If the LLM call fails while generating a retry response.
412
+ """
413
+ if max_retries < 0:
414
+ raise ValueError("max_retries must be non-negative")
415
+
416
+ if self.format is None:
417
+ return None, self
418
+
419
+ current_response: ContextResponse[DepsT, FormattableT] = self
420
+ for attempt in range(max_retries + 1):
421
+ try:
422
+ return current_response.parse(), current_response
423
+ except ParseError as e:
424
+ if attempt == max_retries:
425
+ raise
426
+ current_response = current_response.resume(ctx, e.retry_message())
427
+
428
+ raise AssertionError("Unreachable") # pragma: no cover
429
+
271
430
 
272
431
  class AsyncContextResponse(
273
432
  BaseResponse[AsyncContextToolkit[DepsT], FormattableT], Generic[DepsT, FormattableT]
@@ -360,3 +519,61 @@ class AsyncContextResponse(
360
519
  response=self,
361
520
  content=content,
362
521
  )
522
+
523
+ @overload
524
+ async def validate(
525
+ self: "AsyncContextResponse[DepsT, None]",
526
+ ctx: Context[DepsT],
527
+ max_retries: int = 1,
528
+ ) -> tuple[None, "AsyncContextResponse[DepsT, None]"]: ...
529
+
530
+ @overload
531
+ async def validate(
532
+ self: "AsyncContextResponse[DepsT, FormattableT]",
533
+ ctx: Context[DepsT],
534
+ max_retries: int = 1,
535
+ ) -> tuple[FormattableT, "AsyncContextResponse[DepsT, FormattableT]"]: ...
536
+
537
+ async def validate(
538
+ self, ctx: Context[DepsT], max_retries: int = 1
539
+ ) -> tuple[
540
+ FormattableT | None,
541
+ "AsyncContextResponse[DepsT, FormattableT] | AsyncContextResponse[DepsT, None]",
542
+ ]:
543
+ """Parse and validate the response, retrying on parse errors.
544
+
545
+ Attempts to parse the response. On `ParseError`, asks the LLM to fix its
546
+ output by resuming with the error message. Returns both the parsed value
547
+ and the (potentially updated) response.
548
+
549
+ Args:
550
+ ctx: A `Context` with the required deps type.
551
+ max_retries: Maximum number of retry attempts on parse failure.
552
+ Defaults to 1 (2 total attempts). Must be non-negative.
553
+
554
+ Returns:
555
+ A tuple of (parsed_value, response). If parsing succeeded on the first
556
+ attempt, returns (value, self). If retries were needed, returns
557
+ (value, new_response) where new_response is the final successful response.
558
+
559
+ Raises:
560
+ ValueError: If max_retries is negative.
561
+ ParseError: If parsing fails after exhausting all retry attempts.
562
+ Error: If the LLM call fails while generating a retry response.
563
+ """
564
+ if max_retries < 0:
565
+ raise ValueError("max_retries must be non-negative")
566
+
567
+ if self.format is None:
568
+ return None, self
569
+
570
+ current_response: AsyncContextResponse[DepsT, FormattableT] = self
571
+ for attempt in range(max_retries + 1):
572
+ try:
573
+ return current_response.parse(), current_response
574
+ except ParseError as e:
575
+ if attempt == max_retries:
576
+ raise
577
+ current_response = await current_response.resume(ctx, e.retry_message())
578
+
579
+ raise AssertionError("Unreachable") # pragma: no cover
@@ -6,6 +6,7 @@ from typing import TYPE_CHECKING, Generic, overload
6
6
 
7
7
  from ..content import ToolOutput
8
8
  from ..context import Context, DepsT
9
+ from ..exceptions import ParseError
9
10
  from ..formatting import Format, FormattableT
10
11
  from ..messages import Message, UserContent
11
12
  from ..tools import (
@@ -156,6 +157,61 @@ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
156
157
  content=content,
157
158
  )
158
159
 
160
+ @overload
161
+ def validate(
162
+ self: "StreamResponse[None]", max_retries: int = 1
163
+ ) -> tuple[None, "StreamResponse[None]"]: ...
164
+
165
+ @overload
166
+ def validate(
167
+ self: "StreamResponse[FormattableT]", max_retries: int = 1
168
+ ) -> tuple[FormattableT, "StreamResponse[FormattableT]"]: ...
169
+
170
+ def validate(
171
+ self, max_retries: int = 1
172
+ ) -> tuple[
173
+ FormattableT | None, "StreamResponse[FormattableT] | StreamResponse[None]"
174
+ ]:
175
+ """Parse and validate the response, retrying on parse errors.
176
+
177
+ Consumes the stream (calls `finish()`) and then attempts to parse. On
178
+ `ParseError`, asks the LLM to fix its output by resuming with the error
179
+ message. Returns both the parsed value and the (potentially updated) response.
180
+
181
+ Args:
182
+ max_retries: Maximum number of retry attempts on parse failure.
183
+ Defaults to 1 (2 total attempts). Must be non-negative.
184
+
185
+ Returns:
186
+ A tuple of (parsed_value, response). If parsing succeeded on the first
187
+ attempt, returns (value, self). If retries were needed, returns
188
+ (value, new_response) where new_response is the final successful response.
189
+
190
+ Raises:
191
+ ValueError: If max_retries is negative.
192
+ ParseError: If parsing fails after exhausting all retry attempts.
193
+ Error: If the LLM call fails while generating a retry response.
194
+ """
195
+ if max_retries < 0:
196
+ raise ValueError("max_retries must be non-negative")
197
+
198
+ self.finish()
199
+
200
+ if self.format is None:
201
+ return None, self
202
+
203
+ current_response: StreamResponse[FormattableT] = self
204
+ for attempt in range(max_retries + 1):
205
+ try:
206
+ return current_response.parse(), current_response
207
+ except ParseError as e:
208
+ if attempt == max_retries:
209
+ raise
210
+ current_response = current_response.resume(e.retry_message())
211
+ current_response.finish()
212
+
213
+ raise AssertionError("Unreachable") # pragma: no cover
214
+
159
215
 
160
216
  class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
161
217
  """An `AsyncStreamResponse` wraps response content from the LLM with a streaming interface.
@@ -287,6 +343,62 @@ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
287
343
  content=content,
288
344
  )
289
345
 
346
+ @overload
347
+ async def validate(
348
+ self: "AsyncStreamResponse[None]", max_retries: int = 1
349
+ ) -> tuple[None, "AsyncStreamResponse[None]"]: ...
350
+
351
+ @overload
352
+ async def validate(
353
+ self: "AsyncStreamResponse[FormattableT]", max_retries: int = 1
354
+ ) -> tuple[FormattableT, "AsyncStreamResponse[FormattableT]"]: ...
355
+
356
+ async def validate(
357
+ self, max_retries: int = 1
358
+ ) -> tuple[
359
+ FormattableT | None,
360
+ "AsyncStreamResponse[FormattableT] | AsyncStreamResponse[None]",
361
+ ]:
362
+ """Parse and validate the response, retrying on parse errors.
363
+
364
+ Consumes the stream (calls `finish()`) and then attempts to parse. On
365
+ `ParseError`, asks the LLM to fix its output by resuming with the error
366
+ message. Returns both the parsed value and the (potentially updated) response.
367
+
368
+ Args:
369
+ max_retries: Maximum number of retry attempts on parse failure.
370
+ Defaults to 1 (2 total attempts). Must be non-negative.
371
+
372
+ Returns:
373
+ A tuple of (parsed_value, response). If parsing succeeded on the first
374
+ attempt, returns (value, self). If retries were needed, returns
375
+ (value, new_response) where new_response is the final successful response.
376
+
377
+ Raises:
378
+ ValueError: If max_retries is negative.
379
+ ParseError: If parsing fails after exhausting all retry attempts.
380
+ Error: If the LLM call fails while generating a retry response.
381
+ """
382
+ if max_retries < 0:
383
+ raise ValueError("max_retries must be non-negative")
384
+
385
+ await self.finish()
386
+
387
+ if self.format is None:
388
+ return None, self
389
+
390
+ current_response: AsyncStreamResponse[FormattableT] = self
391
+ for attempt in range(max_retries + 1):
392
+ try:
393
+ return current_response.parse(), current_response
394
+ except ParseError as e:
395
+ if attempt == max_retries:
396
+ raise
397
+ current_response = await current_response.resume(e.retry_message())
398
+ await current_response.finish()
399
+
400
+ raise AssertionError("Unreachable") # pragma: no cover
401
+
290
402
 
291
403
  class ContextStreamResponse(
292
404
  BaseSyncStreamResponse[ContextToolkit[DepsT], FormattableT],
@@ -428,6 +540,67 @@ class ContextStreamResponse(
428
540
  content=content,
429
541
  )
430
542
 
543
+ @overload
544
+ def validate(
545
+ self: "ContextStreamResponse[DepsT, None]",
546
+ ctx: Context[DepsT],
547
+ max_retries: int = 1,
548
+ ) -> tuple[None, "ContextStreamResponse[DepsT, None]"]: ...
549
+
550
+ @overload
551
+ def validate(
552
+ self: "ContextStreamResponse[DepsT, FormattableT]",
553
+ ctx: Context[DepsT],
554
+ max_retries: int = 1,
555
+ ) -> tuple[FormattableT, "ContextStreamResponse[DepsT, FormattableT]"]: ...
556
+
557
+ def validate(
558
+ self, ctx: Context[DepsT], max_retries: int = 1
559
+ ) -> tuple[
560
+ FormattableT | None,
561
+ "ContextStreamResponse[DepsT, FormattableT] | ContextStreamResponse[DepsT, None]",
562
+ ]:
563
+ """Parse and validate the response, retrying on parse errors.
564
+
565
+ Consumes the stream (calls `finish()`) and then attempts to parse. On
566
+ `ParseError`, asks the LLM to fix its output by resuming with the error
567
+ message. Returns both the parsed value and the (potentially updated) response.
568
+
569
+ Args:
570
+ ctx: A `Context` with the required deps type.
571
+ max_retries: Maximum number of retry attempts on parse failure.
572
+ Defaults to 1 (2 total attempts). Must be non-negative.
573
+
574
+ Returns:
575
+ A tuple of (parsed_value, response). If parsing succeeded on the first
576
+ attempt, returns (value, self). If retries were needed, returns
577
+ (value, new_response) where new_response is the final successful response.
578
+
579
+ Raises:
580
+ ValueError: If max_retries is negative.
581
+ ParseError: If parsing fails after exhausting all retry attempts.
582
+ Error: If the LLM call fails while generating a retry response.
583
+ """
584
+ if max_retries < 0:
585
+ raise ValueError("max_retries must be non-negative")
586
+
587
+ self.finish()
588
+
589
+ if self.format is None:
590
+ return None, self
591
+
592
+ current_response: ContextStreamResponse[DepsT, FormattableT] = self
593
+ for attempt in range(max_retries + 1):
594
+ try:
595
+ return current_response.parse(), current_response
596
+ except ParseError as e:
597
+ if attempt == max_retries:
598
+ raise
599
+ current_response = current_response.resume(ctx, e.retry_message())
600
+ current_response.finish()
601
+
602
+ raise AssertionError("Unreachable") # pragma: no cover
603
+
431
604
 
432
605
  class AsyncContextStreamResponse(
433
606
  BaseAsyncStreamResponse[AsyncContextToolkit[DepsT], FormattableT],
@@ -575,3 +748,64 @@ class AsyncContextStreamResponse(
575
748
  response=self,
576
749
  content=content,
577
750
  )
751
+
752
+ @overload
753
+ async def validate(
754
+ self: "AsyncContextStreamResponse[DepsT, None]",
755
+ ctx: Context[DepsT],
756
+ max_retries: int = 1,
757
+ ) -> tuple[None, "AsyncContextStreamResponse[DepsT, None]"]: ...
758
+
759
+ @overload
760
+ async def validate(
761
+ self: "AsyncContextStreamResponse[DepsT, FormattableT]",
762
+ ctx: Context[DepsT],
763
+ max_retries: int = 1,
764
+ ) -> tuple[FormattableT, "AsyncContextStreamResponse[DepsT, FormattableT]"]: ...
765
+
766
+ async def validate(
767
+ self, ctx: Context[DepsT], max_retries: int = 1
768
+ ) -> tuple[
769
+ FormattableT | None,
770
+ "AsyncContextStreamResponse[DepsT, FormattableT] | AsyncContextStreamResponse[DepsT, None]",
771
+ ]:
772
+ """Parse and validate the response, retrying on parse errors.
773
+
774
+ Consumes the stream (calls `finish()`) and then attempts to parse. On
775
+ `ParseError`, asks the LLM to fix its output by resuming with the error
776
+ message. Returns both the parsed value and the (potentially updated) response.
777
+
778
+ Args:
779
+ ctx: A `Context` with the required deps type.
780
+ max_retries: Maximum number of retry attempts on parse failure.
781
+ Defaults to 1 (2 total attempts). Must be non-negative.
782
+
783
+ Returns:
784
+ A tuple of (parsed_value, response). If parsing succeeded on the first
785
+ attempt, returns (value, self). If retries were needed, returns
786
+ (value, new_response) where new_response is the final successful response.
787
+
788
+ Raises:
789
+ ValueError: If max_retries is negative.
790
+ ParseError: If parsing fails after exhausting all retry attempts.
791
+ Error: If the LLM call fails while generating a retry response.
792
+ """
793
+ if max_retries < 0:
794
+ raise ValueError("max_retries must be non-negative")
795
+
796
+ await self.finish()
797
+
798
+ if self.format is None:
799
+ return None, self
800
+
801
+ current_response: AsyncContextStreamResponse[DepsT, FormattableT] = self
802
+ for attempt in range(max_retries + 1):
803
+ try:
804
+ return current_response.parse(), current_response
805
+ except ParseError as e:
806
+ if attempt == max_retries:
807
+ raise
808
+ current_response = await current_response.resume(ctx, e.retry_message())
809
+ await current_response.finish()
810
+
811
+ raise AssertionError("Unreachable") # pragma: no cover
@@ -0,0 +1,51 @@
1
+ """Retry functionality for reliable LLM interactions.
2
+
3
+ This module provides retry capabilities for LLM calls, including:
4
+ - Automatic retry on failures (connection errors, rate limits, etc.)
5
+ - Configurable retry strategies and backoff
6
+ - Fallback models
7
+ - Retry metadata tracking
8
+ """
9
+
10
+ from .retry_calls import AsyncRetryCall, BaseRetryCall, RetryCall
11
+ from .retry_config import RetryArgs, RetryConfig
12
+ from .retry_decorator import retry
13
+ from .retry_models import RetryModel, RetryModelParams, retry_model
14
+ from .retry_prompts import AsyncRetryPrompt, BaseRetryPrompt, RetryPrompt
15
+ from .retry_responses import (
16
+ AsyncContextRetryResponse,
17
+ AsyncRetryResponse,
18
+ ContextRetryResponse,
19
+ RetryResponse,
20
+ )
21
+ from .retry_stream_responses import (
22
+ AsyncContextRetryStreamResponse,
23
+ AsyncRetryStreamResponse,
24
+ ContextRetryStreamResponse,
25
+ RetryStreamResponse,
26
+ )
27
+ from .utils import RetryFailure
28
+
29
+ __all__ = [
30
+ "AsyncContextRetryResponse",
31
+ "AsyncContextRetryStreamResponse",
32
+ "AsyncRetryCall",
33
+ "AsyncRetryPrompt",
34
+ "AsyncRetryResponse",
35
+ "AsyncRetryStreamResponse",
36
+ "BaseRetryCall",
37
+ "BaseRetryPrompt",
38
+ "ContextRetryResponse",
39
+ "ContextRetryStreamResponse",
40
+ "RetryArgs",
41
+ "RetryCall",
42
+ "RetryConfig",
43
+ "RetryFailure",
44
+ "RetryModel",
45
+ "RetryModelParams",
46
+ "RetryPrompt",
47
+ "RetryResponse",
48
+ "RetryStreamResponse",
49
+ "retry",
50
+ "retry_model",
51
+ ]