ai-retry 1.0.0-beta.1 → 1.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -176,6 +176,58 @@ const retryableModel = createRetryable({
176
176
 
177
177
  In this example, if the base model fails with code 429 or a service overloaded error, it will retry with `gpt-4-mini` on Azure. In any other error case, it will fallback to `claude-3-haiku-20240307` on Anthropic. If the order would be reversed, the static retryable would catch all errors first, and the dynamic retryable would never be reached.
178
178
 
179
+ #### Errors vs Results
180
+
181
+ Dynamic retryables can be further divided based on what triggers them:
182
+
183
+ - **Error-based retryables** handle API errors where the request throws an error (e.g., timeouts, rate limits, service unavailable, etc.)
184
+ - **Result-based retryables** handle successful responses that still need retrying (e.g., content filtering, guardrails, etc.)
185
+
186
+ Both types of retryables have the same interface and receive the current attempt as context. You can use the `isErrorAttempt` and `isResultAttempt` type guards to check the type of the current attempt.
187
+
188
+ ```typescript
189
+ import { generateText } from 'ai';
190
+ import { createRetryable, isErrorAttempt, isResultAttempt } from 'ai-retry';
191
+ import type { Retryable } from 'ai-retry';
192
+
193
+ // Error-based retryable: handles thrown errors (e.g., timeouts, rate limits)
194
+ const errorBasedRetry: Retryable = (context) => {
195
+ if (isErrorAttempt(context.current)) {
196
+ const { error } = context.current;
197
+ // The request threw an error - e.g., network timeout, 429 rate limit
198
+ console.log('Request failed with error:', error);
199
+ return { model: anthropic('claude-3-haiku-20240307') };
200
+ }
201
+ return undefined;
202
+ };
203
+
204
+ // Result-based retryable: handles successful responses that need retrying
205
+ const resultBasedRetry: Retryable = (context) => {
206
+ if (isResultAttempt(context.current)) {
207
+ const { result } = context.current;
208
+ // The request succeeded, but the response indicates a problem
209
+ if (result.finishReason === 'content-filter') {
210
+ console.log('Content was filtered, trying different model');
211
+ return { model: openai('gpt-4') };
212
+ }
213
+ }
214
+ return undefined;
215
+ };
216
+
217
+ const retryableModel = createRetryable({
218
+ model: azure('gpt-4-mini'),
219
+ retries: [
220
+ // Error-based: catches thrown errors like timeouts, rate limits, etc.
221
+ errorBasedRetry,
222
+
223
+ // Result-based: catches successful responses that need retrying
224
+ resultBasedRetry,
225
+ ],
226
+ });
227
+ ```
228
+
229
+ Result-based retryables are only available for generate calls like `generateText` and `generateObject`. They are not available for streaming calls like `streamText` and `streamObject`.
230
+
179
231
  #### Fallbacks
180
232
 
181
233
  If you don't need precise error matching with custom logic and just want to fallback to different models on any error, you can simply provide a list of models.
@@ -362,11 +414,23 @@ Handle service overload errors (status code 529) by switching to a provider.
362
414
  import { serviceOverloaded } from 'ai-retry/retryables';
363
415
 
364
416
  const retryableModel = createRetryable({
365
- model: azure('gpt-4'),
417
+ model: anthropic('claude-sonnet-4-0'),
366
418
  retries: [
367
- serviceOverloaded(openai('gpt-4')), // Switch to OpenAI if Azure is overloaded
419
+ // Retry with delay and exponential backoff
420
+ serviceOverloaded(anthropic('claude-sonnet-4-0'), {
421
+ delay: 5_000,
422
+ backoffFactor: 2,
423
+ maxAttempts: 5,
424
+ }),
425
+ // Or switch to a different provider
426
+ serviceOverloaded(openai('gpt-4')),
368
427
  ],
369
428
  });
429
+
430
+ const result = streamText({
431
+ model: retryableModel,
432
+ prompt: 'Write a story about a robot...',
433
+ });
370
434
  ```
371
435
 
372
436
  #### Service Unavailable
@@ -575,6 +639,58 @@ const result = await generateText({
575
639
 
576
640
  The retry's `providerOptions` will completely replace the original ones during retry attempts. This works for all model types (language and embedding) and all operations (generate, stream, embed).
577
641
 
642
+ #### Call Options
643
+
644
+ You can override various call options when retrying requests. This is useful for adjusting parameters like temperature, max tokens, or even the prompt itself for retry attempts. Call options are specified in the `options` field of the retry object.
645
+
646
+ ```typescript
647
+ const retryableModel = createRetryable({
648
+ model: openai('gpt-4'),
649
+ retries: [
650
+ {
651
+ model: anthropic('claude-3-haiku'),
652
+ options: {
653
+ // Override generation parameters for more deterministic output
654
+ temperature: 0.3,
655
+ topP: 0.9,
656
+ maxOutputTokens: 500,
657
+ // Set a seed for reproducibility
658
+ seed: 42,
659
+ },
660
+ },
661
+ ],
662
+ });
663
+ ```
664
+
665
+ The following options can be overridden:
666
+
667
+ > [!NOTE]
668
+ > Override options completely replace the original values (they are not merged). If you don't specify an option, the original value from the request is used.
669
+
670
+ ##### Language Model Options
671
+
672
+ | Option | Description |
673
+ |--------|-------------|
674
+ | [`prompt`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#prompt) | Override the entire prompt for the retry |
675
+ | [`temperature`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#temperature) | Temperature setting for controlling randomness |
676
+ | [`topP`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#topp) | Nucleus sampling parameter |
677
+ | [`topK`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#topk) | Top-K sampling parameter |
678
+ | [`maxOutputTokens`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#max-output-tokens) | Maximum number of tokens to generate |
679
+ | [`seed`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#seed) | Random seed for deterministic generation |
680
+ | [`stopSequences`](https://ai-sdk.dev/docs/reference/ai-sdk-types/generate-text#stopsequences) | Stop sequences to end generation |
681
+ | [`presencePenalty`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#presencepenalty) | Presence penalty for reducing repetition |
682
+ | [`frequencyPenalty`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#frequencypenalty) | Frequency penalty for reducing repetition |
683
+ | [`headers`](https://ai-sdk.dev/docs/reference/ai-sdk-core/generate-text#headers) | Additional HTTP headers |
684
+ | [`providerOptions`](https://ai-sdk.dev/docs/reference/ai-sdk-types/generate-text#provideroptions) | Provider-specific options |
685
+
686
+ ##### Embedding Model Options
687
+
688
+ | Option | Description |
689
+ |--------|-------------|
690
+ | [`values`](https://ai-sdk.dev/docs/reference/ai-sdk-core/embed#values) | Override the values to embed |
691
+ | [`headers`](https://ai-sdk.dev/docs/reference/ai-sdk-core/embed#headers) | Additional HTTP headers |
692
+ | [`providerOptions`](https://ai-sdk.dev/docs/reference/ai-sdk-core/embed#provideroptions) | Provider-specific options |
693
+
578
694
  #### Logging
579
695
 
580
696
  You can use the following callbacks to log retry attempts and errors:
@@ -641,7 +757,7 @@ type Retryable = (
641
757
 
642
758
  #### `Retry`
643
759
 
644
- A `Retry` specifies the model to retry and optional settings like `maxAttempts`, `delay`, `backoffFactor`, `timeout`, and `providerOptions`.
760
+ A `Retry` specifies the model to retry and optional settings. The available options depend on the model type (language model or embedding model).
645
761
 
646
762
  ```typescript
647
763
  interface Retry {
@@ -650,18 +766,11 @@ interface Retry {
650
766
  delay?: number; // Delay in milliseconds before retrying
651
767
  backoffFactor?: number; // Multiplier for exponential backoff
652
768
  timeout?: number; // Timeout in milliseconds for the retry attempt
653
- providerOptions?: ProviderOptions; // Provider-specific options for the retry
769
+ providerOptions?: ProviderOptions; // @deprecated - use options.providerOptions instead
770
+ options?: LanguageModelV2CallOptions | EmbeddingModelV2CallOptions; // Call options to override for this retry
654
771
  }
655
772
  ```
656
773
 
657
- **Options:**
658
- - `model`: The model to use for the retry attempt.
659
- - `maxAttempts`: Maximum number of times this model can be retried. Default is 1.
660
- - `delay`: Delay in milliseconds to wait before retrying. The delay respects abort signals from the request.
661
- - `backoffFactor`: Multiplier for exponential backoff (`delay × backoffFactor^attempt`). If not provided, uses fixed delay.
662
- - `timeout`: Timeout in milliseconds for creating a fresh `AbortSignal.timeout()` for the retry attempt. This replaces any existing abort signal.
663
- - `providerOptions`: Provider-specific options that override the original request's provider options during retry attempts.
664
-
665
774
  #### `RetryContext`
666
775
 
667
776
  The `RetryContext` object contains information about the current attempt and all previous attempts.
@@ -675,13 +784,23 @@ interface RetryContext {
675
784
 
676
785
  #### `RetryAttempt`
677
786
 
678
- A `RetryAttempt` represents a single attempt with a specific model, which can be either an error or a successful result that triggered a retry.
787
+ A `RetryAttempt` represents a single attempt with a specific model, which can be either an error or a successful result that triggered a retry. Each attempt includes the call options that were used for that specific attempt. For retry attempts, this will reflect any overridden options from the retry configuration.
679
788
 
680
789
  ```typescript
681
790
  // For both language and embedding models
682
791
  type RetryAttempt =
683
- | { type: 'error'; error: unknown; model: LanguageModelV2 | EmbeddingModelV2 }
684
- | { type: 'result'; result: LanguageModelV2Generate; model: LanguageModelV2 };
792
+ | {
793
+ type: 'error';
794
+ error: unknown;
795
+ model: LanguageModelV2 | EmbeddingModelV2;
796
+ options: LanguageModelV2CallOptions | EmbeddingModelV2CallOptions;
797
+ }
798
+ | {
799
+ type: 'result';
800
+ result: LanguageModelV2Generate;
801
+ model: LanguageModelV2;
802
+ options: LanguageModelV2CallOptions;
803
+ };
685
804
 
686
805
  // Note: Result-based retries only apply to language models, not embedding models
687
806
 
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { S as RetryableOptions, _ as RetryContext, a as LanguageModel, b as Retryable, c as LanguageModelStream, d as ResolvableLanguageModel, f as ResolvableModel, g as RetryAttempt, h as Retry, i as GatewayLanguageModelId, l as LanguageModelStreamPart, m as Retries, n as EmbeddingModelCallOptions, o as LanguageModelCallOptions, p as ResolvedModel, r as EmbeddingModelEmbed, s as LanguageModelGenerate, t as EmbeddingModel, u as ProviderOptions, v as RetryErrorAttempt, x as RetryableModelOptions, y as RetryResultAttempt } from "./types-D7G-2JLh.mjs";
1
+ import { C as RetryableModelOptions, S as Retryable, _ as Retry, a as GatewayLanguageModelId, b as RetryErrorAttempt, c as LanguageModelGenerate, d as LanguageModelStreamPart, f as ProviderOptions, g as Retries, h as ResolvedModel, i as EmbeddingModelRetryCallOptions, l as LanguageModelRetryCallOptions, m as ResolvableModel, n as EmbeddingModelCallOptions, o as LanguageModel, p as ResolvableLanguageModel, r as EmbeddingModelEmbed, s as LanguageModelCallOptions, t as EmbeddingModel, u as LanguageModelStream, v as RetryAttempt, w as RetryableOptions, x as RetryResultAttempt, y as RetryContext } from "./types-JvcFQz93.mjs";
2
2
  import * as _ai_sdk_provider0 from "@ai-sdk/provider";
3
3
 
4
4
  //#region src/create-retryable-model.d.ts
@@ -63,4 +63,4 @@ declare const isStreamContentPart: (part: LanguageModelStreamPart) => part is _a
63
63
  rawValue: unknown;
64
64
  };
65
65
  //#endregion
66
- export { EmbeddingModel, EmbeddingModelCallOptions, EmbeddingModelEmbed, GatewayLanguageModelId, LanguageModel, LanguageModelCallOptions, LanguageModelGenerate, LanguageModelStream, LanguageModelStreamPart, ProviderOptions, ResolvableLanguageModel, ResolvableModel, ResolvedModel, Retries, Retry, RetryAttempt, RetryContext, RetryErrorAttempt, RetryResultAttempt, Retryable, RetryableModelOptions, RetryableOptions, createRetryable, getModelKey, isEmbeddingModel, isErrorAttempt, isGenerateResult, isLanguageModel, isModel, isObject, isResultAttempt, isStreamContentPart, isStreamResult, isString };
66
+ export { EmbeddingModel, EmbeddingModelCallOptions, EmbeddingModelEmbed, EmbeddingModelRetryCallOptions, GatewayLanguageModelId, LanguageModel, LanguageModelCallOptions, LanguageModelGenerate, LanguageModelRetryCallOptions, LanguageModelStream, LanguageModelStreamPart, ProviderOptions, ResolvableLanguageModel, ResolvableModel, ResolvedModel, Retries, Retry, RetryAttempt, RetryContext, RetryErrorAttempt, RetryResultAttempt, Retryable, RetryableModelOptions, RetryableOptions, createRetryable, getModelKey, isEmbeddingModel, isErrorAttempt, isGenerateResult, isLanguageModel, isModel, isObject, isResultAttempt, isStreamContentPart, isStreamResult, isString };
package/dist/index.mjs CHANGED
@@ -130,6 +130,19 @@ var RetryableEmbeddingModel = class {
130
130
  return typeof this.options.disabled === "function" ? this.options.disabled() : this.options.disabled;
131
131
  }
132
132
  /**
133
+ * Get the retry call options overrides from a retry configuration.
134
+ */
135
+ getRetryCallOptions(callOptions, currentRetry) {
136
+ const retryOptions = currentRetry?.options ?? {};
137
+ return {
138
+ ...callOptions,
139
+ values: retryOptions.values ?? callOptions.values,
140
+ headers: retryOptions.headers ?? callOptions.headers,
141
+ providerOptions: retryOptions.providerOptions ?? currentRetry?.providerOptions ?? callOptions.providerOptions,
142
+ abortSignal: currentRetry?.timeout ? AbortSignal.timeout(currentRetry.timeout) : callOptions.abortSignal
143
+ };
144
+ }
145
+ /**
133
146
  * Execute a function with retry logic for handling errors
134
147
  */
135
148
  async withRetry(input) {
@@ -160,13 +173,17 @@ var RetryableEmbeddingModel = class {
160
173
  };
161
174
  this.options.onRetry?.(context);
162
175
  }
176
+ /**
177
+ * Get the retry call options overrides for this attempt
178
+ */
179
+ const retryCallOptions = this.getRetryCallOptions(input.callOptions, currentRetry);
163
180
  try {
164
181
  return {
165
- result: await input.fn(currentRetry),
182
+ result: await input.fn(retryCallOptions),
166
183
  attempts
167
184
  };
168
185
  } catch (error) {
169
- const { retryModel, attempt } = await this.handleError(error, attempts);
186
+ const { retryModel, attempt } = await this.handleError(error, attempts, retryCallOptions);
170
187
  attempts.push(attempt);
171
188
  if (retryModel.delay) {
172
189
  /**
@@ -178,7 +195,7 @@ var RetryableEmbeddingModel = class {
178
195
  * - Attempt 3: 4000ms
179
196
  */
180
197
  const modelAttemptsCount = countModelAttempts(retryModel.model, attempts);
181
- await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
198
+ await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: retryCallOptions.abortSignal });
182
199
  }
183
200
  this.currentModel = retryModel.model;
184
201
  currentRetry = retryModel;
@@ -188,11 +205,12 @@ var RetryableEmbeddingModel = class {
188
205
  /**
189
206
  * Handle an error and determine if a retry is needed
190
207
  */
191
- async handleError(error, attempts) {
208
+ async handleError(error, attempts, callOptions) {
192
209
  const errorAttempt = {
193
210
  type: "error",
194
211
  error,
195
- model: this.currentModel
212
+ model: this.currentModel,
213
+ options: callOptions
196
214
  };
197
215
  /**
198
216
  * Save the current attempt
@@ -217,7 +235,7 @@ var RetryableEmbeddingModel = class {
217
235
  attempt: errorAttempt
218
236
  };
219
237
  }
220
- async doEmbed(options) {
238
+ async doEmbed(callOptions) {
221
239
  /**
222
240
  * Always start with the original model
223
241
  */
@@ -225,17 +243,12 @@ var RetryableEmbeddingModel = class {
225
243
  /**
226
244
  * If retries are disabled, bypass retry machinery entirely
227
245
  */
228
- if (this.isDisabled()) return this.currentModel.doEmbed(options);
246
+ if (this.isDisabled()) return this.currentModel.doEmbed(callOptions);
229
247
  const { result } = await this.withRetry({
230
- fn: async (currentRetry) => {
231
- const callOptions = {
232
- ...options,
233
- providerOptions: currentRetry?.providerOptions ?? options.providerOptions,
234
- abortSignal: currentRetry?.timeout ? AbortSignal.timeout(currentRetry.timeout) : options.abortSignal
235
- };
236
- return this.currentModel.doEmbed(callOptions);
248
+ fn: async (retryCallOptions) => {
249
+ return this.currentModel.doEmbed(retryCallOptions);
237
250
  },
238
- abortSignal: options.abortSignal
251
+ callOptions
239
252
  });
240
253
  return result;
241
254
  }
@@ -270,6 +283,27 @@ var RetryableLanguageModel = class {
270
283
  return typeof this.options.disabled === "function" ? this.options.disabled() : this.options.disabled;
271
284
  }
272
285
  /**
286
+ * Get the retry call options overrides from a retry configuration.
287
+ */
288
+ getRetryCallOptions(callOptions, currentRetry) {
289
+ const retryOptions = currentRetry?.options ?? {};
290
+ return {
291
+ ...callOptions,
292
+ prompt: retryOptions.prompt ?? callOptions.prompt,
293
+ maxOutputTokens: retryOptions.maxOutputTokens ?? callOptions.maxOutputTokens,
294
+ temperature: retryOptions.temperature ?? callOptions.temperature,
295
+ stopSequences: retryOptions.stopSequences ?? callOptions.stopSequences,
296
+ topP: retryOptions.topP ?? callOptions.topP,
297
+ topK: retryOptions.topK ?? callOptions.topK,
298
+ presencePenalty: retryOptions.presencePenalty ?? callOptions.presencePenalty,
299
+ frequencyPenalty: retryOptions.frequencyPenalty ?? callOptions.frequencyPenalty,
300
+ seed: retryOptions.seed ?? callOptions.seed,
301
+ headers: retryOptions.headers ?? callOptions.headers,
302
+ providerOptions: retryOptions.providerOptions ?? currentRetry?.providerOptions ?? callOptions.providerOptions,
303
+ abortSignal: currentRetry?.timeout ? AbortSignal.timeout(currentRetry.timeout) : callOptions.abortSignal
304
+ };
305
+ }
306
+ /**
273
307
  * Execute a function with retry logic for handling errors
274
308
  */
275
309
  async withRetry(input) {
@@ -280,7 +314,7 @@ var RetryableLanguageModel = class {
280
314
  /**
281
315
  * Track current retry configuration.
282
316
  */
283
- let currentRetry;
317
+ let currentRetry = input.currentRetry;
284
318
  while (true) {
285
319
  /**
286
320
  * The previous attempt that triggered a retry, or undefined if this is the first attempt
@@ -300,16 +334,20 @@ var RetryableLanguageModel = class {
300
334
  };
301
335
  this.options.onRetry?.(context);
302
336
  }
337
+ /**
338
+ * Get the retry call options overrides for this attempt
339
+ */
340
+ const retryCallOptions = this.getRetryCallOptions(input.callOptions, currentRetry);
303
341
  try {
304
342
  /**
305
343
  * Call the function that may need to be retried
306
344
  */
307
- const result = await input.fn(currentRetry);
345
+ const result = await input.fn(retryCallOptions);
308
346
  /**
309
347
  * Check if the result should trigger a retry (only for generate results, not streams)
310
348
  */
311
349
  if (isGenerateResult(result)) {
312
- const { retryModel, attempt } = await this.handleResult(result, attempts);
350
+ const { retryModel, attempt } = await this.handleResult(result, attempts, retryCallOptions);
313
351
  attempts.push(attempt);
314
352
  if (retryModel) {
315
353
  if (retryModel.delay) {
@@ -322,7 +360,7 @@ var RetryableLanguageModel = class {
322
360
  * - Attempt 3: 4000ms
323
361
  */
324
362
  const modelAttemptsCount = countModelAttempts(retryModel.model, attempts);
325
- await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
363
+ await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: retryCallOptions.abortSignal });
326
364
  }
327
365
  this.currentModel = retryModel.model;
328
366
  currentRetry = retryModel;
@@ -337,7 +375,7 @@ var RetryableLanguageModel = class {
337
375
  attempts
338
376
  };
339
377
  } catch (error) {
340
- const { retryModel, attempt } = await this.handleError(error, attempts);
378
+ const { retryModel, attempt } = await this.handleError(error, attempts, retryCallOptions);
341
379
  attempts.push(attempt);
342
380
  if (retryModel.delay) {
343
381
  /**
@@ -345,7 +383,7 @@ var RetryableLanguageModel = class {
345
383
  * The delay grows exponentially: baseDelay * backoffFactor^attempts
346
384
  */
347
385
  const modelAttemptsCount = countModelAttempts(retryModel.model, attempts);
348
- await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
386
+ await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: retryCallOptions.abortSignal });
349
387
  }
350
388
  this.currentModel = retryModel.model;
351
389
  currentRetry = retryModel;
@@ -355,11 +393,12 @@ var RetryableLanguageModel = class {
355
393
  /**
356
394
  * Handle a successful result and determine if a retry is needed
357
395
  */
358
- async handleResult(result, attempts) {
396
+ async handleResult(result, attempts, callOptions) {
359
397
  const resultAttempt = {
360
398
  type: "result",
361
399
  result,
362
- model: this.currentModel
400
+ model: this.currentModel,
401
+ options: callOptions
363
402
  };
364
403
  const context = {
365
404
  current: resultAttempt,
@@ -373,11 +412,12 @@ var RetryableLanguageModel = class {
373
412
  /**
374
413
  * Handle an error and determine if a retry is needed
375
414
  */
376
- async handleError(error, attempts) {
415
+ async handleError(error, attempts, callOptions) {
377
416
  const errorAttempt = {
378
417
  type: "error",
379
418
  error,
380
- model: this.currentModel
419
+ model: this.currentModel,
420
+ options: callOptions
381
421
  };
382
422
  /**
383
423
  * Save the current attempt
@@ -402,7 +442,7 @@ var RetryableLanguageModel = class {
402
442
  attempt: errorAttempt
403
443
  };
404
444
  }
405
- async doGenerate(options) {
445
+ async doGenerate(callOptions) {
406
446
  /**
407
447
  * Always start with the original model
408
448
  */
@@ -410,21 +450,16 @@ var RetryableLanguageModel = class {
410
450
  /**
411
451
  * If retries are disabled, bypass retry machinery entirely
412
452
  */
413
- if (this.isDisabled()) return this.currentModel.doGenerate(options);
453
+ if (this.isDisabled()) return this.currentModel.doGenerate(callOptions);
414
454
  const { result } = await this.withRetry({
415
- fn: async (currentRetry) => {
416
- const callOptions = {
417
- ...options,
418
- providerOptions: currentRetry?.providerOptions ?? options.providerOptions,
419
- abortSignal: currentRetry?.timeout ? AbortSignal.timeout(currentRetry.timeout) : options.abortSignal
420
- };
421
- return this.currentModel.doGenerate(callOptions);
455
+ fn: async (retryCallOptions) => {
456
+ return this.currentModel.doGenerate(retryCallOptions);
422
457
  },
423
- abortSignal: options.abortSignal
458
+ callOptions
424
459
  });
425
460
  return result;
426
461
  }
427
- async doStream(options) {
462
+ async doStream(callOptions) {
428
463
  /**
429
464
  * Always start with the original model
430
465
  */
@@ -432,22 +467,21 @@ var RetryableLanguageModel = class {
432
467
  /**
433
468
  * If retries are disabled, bypass retry machinery entirely
434
469
  */
435
- if (this.isDisabled()) return this.currentModel.doStream(options);
470
+ if (this.isDisabled()) return this.currentModel.doStream(callOptions);
436
471
  /**
437
472
  * Perform the initial call to doStream with retry logic to handle errors before any data is streamed.
438
473
  */
439
474
  let { result, attempts } = await this.withRetry({
440
- fn: async (currentRetry) => {
441
- const callOptions = {
442
- ...options,
443
- providerOptions: currentRetry?.providerOptions ?? options.providerOptions,
444
- abortSignal: currentRetry?.timeout ? AbortSignal.timeout(currentRetry.timeout) : options.abortSignal
445
- };
446
- return this.currentModel.doStream(callOptions);
475
+ fn: async (retryCallOptions) => {
476
+ return this.currentModel.doStream(retryCallOptions);
447
477
  },
448
- abortSignal: options.abortSignal
478
+ callOptions
449
479
  });
450
480
  /**
481
+ * Track the current retry model for computing call options in the stream handler
482
+ */
483
+ let currentRetry;
484
+ /**
451
485
  * Wrap the original stream to handle retries if an error occurs during streaming.
452
486
  */
453
487
  const retryableStream = new ReadableStream({ start: async (controller) => {
@@ -477,11 +511,15 @@ var RetryableLanguageModel = class {
477
511
  controller.close();
478
512
  break;
479
513
  } catch (error) {
514
+ /**
515
+ * Get the retry call options for the failed attempt
516
+ */
517
+ const retryCallOptions = this.getRetryCallOptions(callOptions, currentRetry);
480
518
  /**
481
519
  * Check if the error from the stream can be retried.
482
520
  * Otherwise it will rethrow the error.
483
521
  */
484
- const { retryModel, attempt } = await this.handleError(error, attempts);
522
+ const { retryModel, attempt } = await this.handleError(error, attempts, retryCallOptions);
485
523
  /**
486
524
  * Save the attempt
487
525
  */
@@ -492,24 +530,21 @@ var RetryableLanguageModel = class {
492
530
  * The delay grows exponentially: baseDelay * backoffFactor^attempts
493
531
  */
494
532
  const modelAttemptsCount = countModelAttempts(retryModel.model, attempts);
495
- await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: options.abortSignal });
533
+ await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: retryCallOptions.abortSignal });
496
534
  }
497
535
  this.currentModel = retryModel.model;
536
+ currentRetry = retryModel;
498
537
  /**
499
538
  * Retry the request by calling doStream again.
500
539
  * This will create a new stream.
501
540
  */
502
541
  const retriedResult = await this.withRetry({
503
- fn: async () => {
504
- const callOptions = {
505
- ...options,
506
- providerOptions: retryModel.providerOptions ?? options.providerOptions,
507
- abortSignal: retryModel.timeout ? AbortSignal.timeout(retryModel.timeout) : options.abortSignal
508
- };
509
- return this.currentModel.doStream(callOptions);
542
+ fn: async (retryCallOptions$1) => {
543
+ return this.currentModel.doStream(retryCallOptions$1);
510
544
  },
545
+ callOptions,
511
546
  attempts,
512
- abortSignal: options.abortSignal
547
+ currentRetry
513
548
  });
514
549
  /**
515
550
  * Cancel the previous reader and stream if we are retrying
@@ -1,4 +1,4 @@
1
- import { S as RetryableOptions, b as Retryable, d as ResolvableLanguageModel, t as EmbeddingModel } from "../types-D7G-2JLh.mjs";
1
+ import { S as Retryable, p as ResolvableLanguageModel, t as EmbeddingModel, w as RetryableOptions } from "../types-JvcFQz93.mjs";
2
2
 
3
3
  //#region src/retryables/content-filter-triggered.d.ts
4
4
 
@@ -4,7 +4,7 @@ import { EmbeddingModelV3, LanguageModelV3, LanguageModelV3CallOptions, Language
4
4
  //#region src/types.d.ts
5
5
  type Literals<T> = T extends string ? string extends T ? never : T : never;
6
6
  type LanguageModel = LanguageModelV3;
7
- type EmbeddingModel<VALUE = any> = EmbeddingModelV3<VALUE>;
7
+ type EmbeddingModel = EmbeddingModelV3;
8
8
  type LanguageModelCallOptions = LanguageModelV3CallOptions;
9
9
  type LanguageModelStreamPart = LanguageModelV3StreamPart;
10
10
  type ProviderOptions = SharedV3ProviderOptions;
@@ -13,28 +13,13 @@ type ResolvableLanguageModel = LanguageModel | Literals<GatewayLanguageModelId>;
13
13
  type ResolvableModel<MODEL extends LanguageModel | EmbeddingModel> = MODEL extends LanguageModel ? ResolvableLanguageModel : EmbeddingModel;
14
14
  type ResolvedModel<MODEL extends ResolvableLanguageModel | EmbeddingModel> = MODEL extends ResolvableLanguageModel ? LanguageModel : EmbeddingModel;
15
15
  /**
16
- * Options for creating a retryable model.
16
+ * Call options that can be overridden during retry for language models.
17
17
  */
18
- interface RetryableModelOptions<MODEL extends LanguageModel | EmbeddingModel> {
19
- model: MODEL;
20
- retries: Retries<MODEL>;
21
- disabled?: boolean | (() => boolean);
22
- onError?: (context: RetryContext<MODEL>) => void;
23
- onRetry?: (context: RetryContext<MODEL>) => void;
24
- }
18
+ type LanguageModelRetryCallOptions = Partial<Pick<LanguageModelCallOptions, 'prompt' | 'maxOutputTokens' | 'temperature' | 'stopSequences' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'seed' | 'headers' | 'providerOptions'>>;
25
19
  /**
26
- * The context provided to Retryables with the current attempt and all previous attempts.
20
+ * Call options that can be overridden during retry for embedding models.
27
21
  */
28
- type RetryContext<MODEL extends ResolvableLanguageModel | EmbeddingModel> = {
29
- /**
30
- * Current attempt that caused the retry
31
- */
32
- current: RetryAttempt<ResolvedModel<MODEL>>;
33
- /**
34
- * All attempts made so far, including the current one
35
- */
36
- attempts: Array<RetryAttempt<ResolvedModel<MODEL>>>;
37
- };
22
+ type EmbeddingModelRetryCallOptions = Partial<Pick<EmbeddingModelCallOptions, 'values' | 'headers' | 'providerOptions'>>;
38
23
  /**
39
24
  * A retry attempt with an error
40
25
  */
@@ -43,6 +28,10 @@ type RetryErrorAttempt<MODEL extends LanguageModel | EmbeddingModel> = {
43
28
  error: unknown;
44
29
  result?: undefined;
45
30
  model: MODEL;
31
+ /**
32
+ * The call options used for this attempt.
33
+ */
34
+ options: MODEL extends LanguageModel ? LanguageModelCallOptions : EmbeddingModelCallOptions;
46
35
  };
47
36
  /**
48
37
  * A retry attempt with a successful result
@@ -52,11 +41,38 @@ type RetryResultAttempt = {
52
41
  result: LanguageModelGenerate;
53
42
  error?: undefined;
54
43
  model: LanguageModel;
44
+ /**
45
+ * The call options used for this attempt.
46
+ */
47
+ options: LanguageModelCallOptions;
55
48
  };
56
49
  /**
57
50
  * A retry attempt with either an error or a result and the model used
58
51
  */
59
52
  type RetryAttempt<MODEL extends LanguageModel | EmbeddingModel> = RetryErrorAttempt<MODEL> | RetryResultAttempt;
53
+ /**
54
+ * The context provided to Retryables with the current attempt and all previous attempts.
55
+ */
56
+ type RetryContext<MODEL extends ResolvableLanguageModel | EmbeddingModel> = {
57
+ /**
58
+ * Current attempt that caused the retry
59
+ */
60
+ current: RetryAttempt<ResolvedModel<MODEL>>;
61
+ /**
62
+ * All attempts made so far, including the current one
63
+ */
64
+ attempts: Array<RetryAttempt<ResolvedModel<MODEL>>>;
65
+ };
66
+ /**
67
+ * Options for creating a retryable model.
68
+ */
69
+ interface RetryableModelOptions<MODEL extends LanguageModel | EmbeddingModel> {
70
+ model: MODEL;
71
+ retries: Retries<MODEL>;
72
+ disabled?: boolean | (() => boolean);
73
+ onError?: (context: RetryContext<MODEL>) => void;
74
+ onRetry?: (context: RetryContext<MODEL>) => void;
75
+ }
60
76
  /**
61
77
  * A model to retry with and the maximum number of attempts for that model.
62
78
  *
@@ -70,11 +86,34 @@ type RetryAttempt<MODEL extends LanguageModel | EmbeddingModel> = RetryErrorAtte
70
86
  */
71
87
  type Retry<MODEL extends ResolvableLanguageModel | EmbeddingModel> = {
72
88
  model: MODEL;
89
+ /**
90
+ * Maximum number of attempts for this model.
91
+ */
73
92
  maxAttempts?: number;
93
+ /**
94
+ * Delay in milliseconds before retrying.
95
+ */
74
96
  delay?: number;
97
+ /**
98
+ * Factor to multiply the delay by for exponential backoff.
99
+ */
75
100
  backoffFactor?: number;
76
- providerOptions?: ProviderOptions;
101
+ /**
102
+ * Timeout in milliseconds for the retry request.
103
+ * Creates a new AbortSignal with this timeout.
104
+ */
77
105
  timeout?: number;
106
+ /**
107
+ * Call options to override for this retry.
108
+ */
109
+ options?: MODEL extends LanguageModel ? Partial<LanguageModelRetryCallOptions> : Partial<EmbeddingModelRetryCallOptions>;
110
+ /**
111
+ * @deprecated Use `options.providerOptions` instead.
112
+ * Provider options to override for this retry.
113
+ * If both `providerOptions` and `options.providerOptions` are set,
114
+ * `options.providerOptions` takes precedence.
115
+ */
116
+ providerOptions?: SharedV3ProviderOptions;
78
117
  };
79
118
  /**
80
119
  * A function that determines whether to retry with a different model based on the current attempt and all previous attempts.
@@ -84,7 +123,7 @@ type Retries<MODEL extends LanguageModel | EmbeddingModel> = Array<Retryable<Res
84
123
  type RetryableOptions<MODEL extends ResolvableLanguageModel | EmbeddingModel> = Partial<Omit<Retry<MODEL>, 'model'>>;
85
124
  type LanguageModelGenerate = Awaited<ReturnType<LanguageModel['doGenerate']>>;
86
125
  type LanguageModelStream = Awaited<ReturnType<LanguageModel['doStream']>>;
87
- type EmbeddingModelCallOptions<VALUE> = Parameters<EmbeddingModel<VALUE>['doEmbed']>[0];
88
- type EmbeddingModelEmbed<VALUE = any> = Awaited<ReturnType<EmbeddingModel<VALUE>['doEmbed']>>;
126
+ type EmbeddingModelCallOptions = Parameters<EmbeddingModel['doEmbed']>[0];
127
+ type EmbeddingModelEmbed = Awaited<ReturnType<EmbeddingModel['doEmbed']>>;
89
128
  //#endregion
90
- export { RetryableOptions as S, RetryContext as _, LanguageModel as a, Retryable as b, LanguageModelStream as c, ResolvableLanguageModel as d, ResolvableModel as f, RetryAttempt as g, Retry as h, GatewayLanguageModelId as i, LanguageModelStreamPart as l, Retries as m, EmbeddingModelCallOptions as n, LanguageModelCallOptions as o, ResolvedModel as p, EmbeddingModelEmbed as r, LanguageModelGenerate as s, EmbeddingModel as t, ProviderOptions as u, RetryErrorAttempt as v, RetryableModelOptions as x, RetryResultAttempt as y };
129
+ export { RetryableModelOptions as C, Retryable as S, Retry as _, GatewayLanguageModelId as a, RetryErrorAttempt as b, LanguageModelGenerate as c, LanguageModelStreamPart as d, ProviderOptions as f, Retries as g, ResolvedModel as h, EmbeddingModelRetryCallOptions as i, LanguageModelRetryCallOptions as l, ResolvableModel as m, EmbeddingModelCallOptions as n, LanguageModel as o, ResolvableLanguageModel as p, EmbeddingModelEmbed as r, LanguageModelCallOptions as s, EmbeddingModel as t, LanguageModelStream as u, RetryAttempt as v, RetryableOptions as w, RetryResultAttempt as x, RetryContext as y };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai-retry",
3
- "version": "1.0.0-beta.1",
3
+ "version": "1.0.0-beta.2",
4
4
  "description": "AI SDK Retry",
5
5
  "main": "./dist/index.mjs",
6
6
  "module": "./dist/index.mjs",
@@ -33,30 +33,30 @@
33
33
  "ai": "6.x"
34
34
  },
35
35
  "devDependencies": {
36
- "@ai-sdk/anthropic": "3.0.0-beta.59",
37
- "@ai-sdk/azure": "3.0.0-beta.68",
38
- "@ai-sdk/gateway": "2.0.0-beta.66",
39
- "@ai-sdk/groq": "3.0.0-beta.36",
40
- "@ai-sdk/openai": "3.0.0-beta.66",
36
+ "@ai-sdk/anthropic": "3.0.0-beta.80",
37
+ "@ai-sdk/azure": "3.0.0-beta.91",
38
+ "@ai-sdk/gateway": "2.0.18",
39
+ "@ai-sdk/groq": "3.0.0-beta.48",
40
+ "@ai-sdk/openai": "3.0.0-beta.89",
41
41
  "@ai-sdk/test-server": "1.0.0-beta.1",
42
42
  "@arethetypeswrong/cli": "^0.18.2",
43
- "@biomejs/biome": "^2.3.7",
43
+ "@biomejs/biome": "^2.3.8",
44
44
  "@total-typescript/tsconfig": "^1.0.4",
45
- "@types/node": "^24.10.1",
46
- "ai": "6.0.0-beta.116",
45
+ "@types/node": "^24.10.2",
46
+ "ai": "6.0.0-beta.139",
47
47
  "husky": "^9.1.7",
48
- "msw": "^2.12.3",
49
- "pkg-pr-new": "^0.0.60",
50
- "publint": "^0.3.15",
51
- "tsdown": "^0.16.7",
52
- "tsx": "^4.20.6",
53
- "typescript": "^5.9.2",
54
- "vitest": "^4.0.14",
48
+ "msw": "^2.12.4",
49
+ "pkg-pr-new": "^0.0.62",
50
+ "publint": "^0.3.16",
51
+ "tsdown": "^0.17.2",
52
+ "tsx": "^4.21.0",
53
+ "typescript": "^5.9.3",
54
+ "vitest": "^4.0.15",
55
55
  "zod": "^4.1.13"
56
56
  },
57
57
  "dependencies": {
58
- "@ai-sdk/provider": "3.0.0-beta.17",
59
- "@ai-sdk/provider-utils": "4.0.0-beta.34"
58
+ "@ai-sdk/provider": "3.0.0-beta.26",
59
+ "@ai-sdk/provider-utils": "4.0.0-beta.45"
60
60
  },
61
61
  "scripts": {
62
62
  "build": "tsdown",