ai-retry 0.5.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -331,6 +331,41 @@ const retryableModel = createRetryable({
331
331
 
332
332
  The attempts are counted per unique model (provider + modelId). That means if multiple retryables return the same model, it won't be retried again once the `maxAttempts` is reached.
333
333
 
334
+ #### Provider Options
335
+
336
+ You can override provider-specific options for each retry attempt. This is useful when you want to use different configurations for fallback models.
337
+
338
+ ```typescript
339
+ const retryableModel = createRetryable({
340
+ model: openai('gpt-5'),
341
+ retries: [
342
+ // Use different provider options for the retry
343
+ () => ({
344
+ model: openai('gpt-4o-2024-08-06'),
345
+ providerOptions: {
346
+ openai: {
347
+ user: 'fallback-user',
348
+ structuredOutputs: false,
349
+ },
350
+ },
351
+ }),
352
+ ],
353
+ });
354
+
355
+ // Original provider options are used for the first attempt
356
+ const result = await generateText({
357
+ model: retryableModel,
358
+ prompt: 'Write a story',
359
+ providerOptions: {
360
+ openai: {
361
+ user: 'primary-user',
362
+ },
363
+ },
364
+ });
365
+ ```
366
+
367
+ The retry's `providerOptions` will completely replace the original ones during retry attempts. This works for all model types (language and embedding) and all operations (generate, stream, embed).
368
+
334
369
  #### Logging
335
370
 
336
371
  You can use the following callbacks to log retry attempts and errors:
@@ -401,14 +436,15 @@ type Retryable = (
401
436
 
402
437
  #### `Retry`
403
438
 
404
- A `Retry` specifies the model to retry and optional settings like `maxAttempts`, `delay` and `backoffFactor`.
439
+ A `Retry` specifies the model to retry and optional settings like `maxAttempts`, `delay`, `backoffFactor`, and `providerOptions`.
405
440
 
406
441
  ```typescript
407
442
  interface Retry {
408
443
  model: LanguageModelV2 | EmbeddingModelV2;
409
- maxAttempts?: number; // Maximum retry attempts per model (default: 1)
410
- delay?: number; // Delay in milliseconds before retrying
411
- backoffFactor?: number; // Multiplier for exponential backoff
444
+ maxAttempts?: number; // Maximum retry attempts per model (default: 1)
445
+ delay?: number; // Delay in milliseconds before retrying
446
+ backoffFactor?: number; // Multiplier for exponential backoff
447
+ providerOptions?: ProviderOptions; // Provider-specific options for the retry
412
448
  }
413
449
  ```
414
450
 
@@ -417,6 +453,7 @@ interface Retry {
417
453
  - `maxAttempts`: Maximum number of times this model can be retried. Default is 1.
418
454
  - `delay`: Delay in milliseconds to wait before retrying. The delay respects abort signals from the request.
419
455
  - `backoffFactor`: Multiplier for exponential backoff (`delay × backoffFactor^attempt`). If not provided, uses fixed delay.
456
+ - `providerOptions`: Provider-specific options that override the original request's provider options during retry attempts.
420
457
 
421
458
  #### `RetryContext`
422
459
 
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { a as LanguageModelV2Generate, c as Retry, d as RetryErrorAttempt, f as RetryResultAttempt, h as RetryableOptions, i as LanguageModelV2, l as RetryAttempt, m as RetryableModelOptions, n as EmbeddingModelV2CallOptions, o as LanguageModelV2Stream, p as Retryable, r as EmbeddingModelV2Embed, s as Retries, t as EmbeddingModelV2, u as RetryContext } from "./types-DqwAmcZS.js";
1
+ import { a as LanguageModelV2Generate, c as Retry, d as RetryErrorAttempt, f as RetryResultAttempt, h as RetryableOptions, i as LanguageModelV2, l as RetryAttempt, m as RetryableModelOptions, n as EmbeddingModelV2CallOptions, o as LanguageModelV2Stream, p as Retryable, r as EmbeddingModelV2Embed, s as Retries, t as EmbeddingModelV2, u as RetryContext } from "./types-DhGbwiB4.js";
2
2
  import * as _ai_sdk_provider0 from "@ai-sdk/provider";
3
3
  import { LanguageModelV2 as LanguageModelV2$1, LanguageModelV2StreamPart } from "@ai-sdk/provider";
4
4
 
package/dist/index.js CHANGED
@@ -109,6 +109,10 @@ var RetryableEmbeddingModel = class {
109
109
  * Track all attempts.
110
110
  */
111
111
  const attempts = input.attempts ?? [];
112
+ /**
113
+ * Track current retry configuration.
114
+ */
115
+ let currentRetry;
112
116
  while (true) {
113
117
  /**
114
118
  * The previous attempt that triggered a retry, or undefined if this is the first attempt
@@ -130,7 +134,7 @@ var RetryableEmbeddingModel = class {
130
134
  }
131
135
  try {
132
136
  return {
133
- result: await input.fn(),
137
+ result: await input.fn(currentRetry),
134
138
  attempts
135
139
  };
136
140
  } catch (error) {
@@ -149,6 +153,7 @@ var RetryableEmbeddingModel = class {
149
153
  await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
150
154
  }
151
155
  this.currentModel = retryModel.model;
156
+ currentRetry = retryModel;
152
157
  }
153
158
  }
154
159
  }
@@ -190,7 +195,13 @@ var RetryableEmbeddingModel = class {
190
195
  */
191
196
  this.currentModel = this.baseModel;
192
197
  const { result } = await this.withRetry({
193
- fn: async () => await this.currentModel.doEmbed(options),
198
+ fn: async (currentRetry) => {
199
+ const callOptions = {
200
+ ...options,
201
+ providerOptions: currentRetry?.providerOptions ?? options.providerOptions
202
+ };
203
+ return this.currentModel.doEmbed(callOptions);
204
+ },
194
205
  abortSignal: options.abortSignal
195
206
  });
196
207
  return result;
@@ -226,6 +237,10 @@ var RetryableLanguageModel = class {
226
237
  * Track all attempts.
227
238
  */
228
239
  const attempts = input.attempts ?? [];
240
+ /**
241
+ * Track current retry configuration.
242
+ */
243
+ let currentRetry;
229
244
  while (true) {
230
245
  /**
231
246
  * The previous attempt that triggered a retry, or undefined if this is the first attempt
@@ -249,7 +264,7 @@ var RetryableLanguageModel = class {
249
264
  /**
250
265
  * Call the function that may need to be retried
251
266
  */
252
- const result = await input.fn();
267
+ const result = await input.fn(currentRetry);
253
268
  /**
254
269
  * Check if the result should trigger a retry (only for generate results, not streams)
255
270
  */
@@ -270,6 +285,7 @@ var RetryableLanguageModel = class {
270
285
  await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
271
286
  }
272
287
  this.currentModel = retryModel.model;
288
+ currentRetry = retryModel;
273
289
  /**
274
290
  * Continue to the next iteration to retry
275
291
  */
@@ -292,6 +308,7 @@ var RetryableLanguageModel = class {
292
308
  await delay(calculateExponentialBackoff(retryModel.delay, retryModel.backoffFactor, modelAttemptsCount), { abortSignal: input.abortSignal });
293
309
  }
294
310
  this.currentModel = retryModel.model;
311
+ currentRetry = retryModel;
295
312
  }
296
313
  }
297
314
  }
@@ -351,7 +368,13 @@ var RetryableLanguageModel = class {
351
368
  */
352
369
  this.currentModel = this.baseModel;
353
370
  const { result } = await this.withRetry({
354
- fn: async () => await this.currentModel.doGenerate(options),
371
+ fn: async (currentRetry) => {
372
+ const callOptions = {
373
+ ...options,
374
+ providerOptions: currentRetry?.providerOptions ?? options.providerOptions
375
+ };
376
+ return this.currentModel.doGenerate(callOptions);
377
+ },
355
378
  abortSignal: options.abortSignal
356
379
  });
357
380
  return result;
@@ -365,7 +388,13 @@ var RetryableLanguageModel = class {
365
388
  * Perform the initial call to doStream with retry logic to handle errors before any data is streamed.
366
389
  */
367
390
  let { result, attempts } = await this.withRetry({
368
- fn: async () => await this.currentModel.doStream(options),
391
+ fn: async (currentRetry) => {
392
+ const callOptions = {
393
+ ...options,
394
+ providerOptions: currentRetry?.providerOptions ?? options.providerOptions
395
+ };
396
+ return this.currentModel.doStream(callOptions);
397
+ },
369
398
  abortSignal: options.abortSignal
370
399
  });
371
400
  /**
@@ -421,7 +450,13 @@ var RetryableLanguageModel = class {
421
450
  * This will create a new stream.
422
451
  */
423
452
  const retriedResult = await this.withRetry({
424
- fn: async () => await this.currentModel.doStream(options),
453
+ fn: async () => {
454
+ const callOptions = {
455
+ ...options,
456
+ providerOptions: retryModel.providerOptions ?? options.providerOptions
457
+ };
458
+ return this.currentModel.doStream(callOptions);
459
+ },
425
460
  attempts,
426
461
  abortSignal: options.abortSignal
427
462
  });
@@ -1,4 +1,4 @@
1
- import { h as RetryableOptions, i as LanguageModelV2, p as Retryable, t as EmbeddingModelV2 } from "../types-DqwAmcZS.js";
1
+ import { h as RetryableOptions, i as LanguageModelV2, p as Retryable, t as EmbeddingModelV2 } from "../types-DhGbwiB4.js";
2
2
 
3
3
  //#region src/retryables/content-filter-triggered.d.ts
4
4
 
@@ -1,3 +1,4 @@
1
+ import { ProviderOptions } from "@ai-sdk/provider-utils";
1
2
  import { EmbeddingModelV2, LanguageModelV2 as LanguageModelV2$1 } from "@ai-sdk/provider";
2
3
 
3
4
  //#region src/types.d.ts
@@ -54,6 +55,7 @@ type Retry<MODEL extends LanguageModelV2$1 | EmbeddingModelV2$1> = {
54
55
  maxAttempts?: number;
55
56
  delay?: number;
56
57
  backoffFactor?: number;
58
+ providerOptions?: ProviderOptions;
57
59
  };
58
60
  /**
59
61
  * A function that determines whether to retry with a different model based on the current attempt and all previous attempts.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai-retry",
3
- "version": "0.5.1",
3
+ "version": "0.6.0",
4
4
  "description": "AI SDK Retry",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.js",
@@ -36,7 +36,7 @@
36
36
  "@ai-sdk/anthropic": "^2.0.18",
37
37
  "@ai-sdk/azure": "^2.0.30",
38
38
  "@ai-sdk/groq": "^2.0.24",
39
- "@ai-sdk/openai": "^2.0.30",
39
+ "@ai-sdk/openai": "^2.0.53",
40
40
  "@arethetypeswrong/cli": "^0.18.2",
41
41
  "@biomejs/biome": "^2.2.4",
42
42
  "@total-typescript/tsconfig": "^1.0.4",