@ai-sdk/openai 2.0.66 → 2.0.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.67
4
+
5
+ ### Patch Changes
6
+
7
+ - 3735f5a: feat(openai): Add support for 'promptCacheRetention: 24h' for gpt5.1 series
8
+
3
9
  ## 2.0.66
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
@@ -332,6 +333,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
332
333
  parallelToolCalls?: boolean | null | undefined;
333
334
  previousResponseId?: string | null | undefined;
334
335
  promptCacheKey?: string | null | undefined;
336
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
335
337
  reasoningEffort?: string | null | undefined;
336
338
  reasoningSummary?: string | null | undefined;
337
339
  safetyIdentifier?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
@@ -332,6 +333,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
332
333
  parallelToolCalls?: boolean | null | undefined;
333
334
  previousResponseId?: string | null | undefined;
334
335
  promptCacheKey?: string | null | undefined;
336
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
335
337
  reasoningEffort?: string | null | undefined;
336
338
  reasoningSummary?: string | null | undefined;
337
339
  safetyIdentifier?: string | null | undefined;
package/dist/index.js CHANGED
@@ -483,6 +483,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
483
483
  * Useful for improving cache hit rates and working around automatic caching issues.
484
484
  */
485
485
  promptCacheKey: import_v43.z.string().optional(),
486
+ /**
487
+ * The retention policy for the prompt cache.
488
+ * - 'in_memory': Default. Standard prompt caching behavior.
489
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
490
+ * Currently only available for 5.1 series models.
491
+ *
492
+ * @default 'in_memory'
493
+ */
494
+ promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
486
495
  /**
487
496
  * A stable identifier used to help detect users of your application
488
497
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -649,6 +658,7 @@ var OpenAIChatLanguageModel = class {
649
658
  reasoning_effort: openaiOptions.reasoningEffort,
650
659
  service_tier: openaiOptions.serviceTier,
651
660
  prompt_cache_key: openaiOptions.promptCacheKey,
661
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
652
662
  safety_identifier: openaiOptions.safetyIdentifier,
653
663
  // messages:
654
664
  messages
@@ -2920,6 +2930,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
2920
2930
  parallelToolCalls: import_v417.z.boolean().nullish(),
2921
2931
  previousResponseId: import_v417.z.string().nullish(),
2922
2932
  promptCacheKey: import_v417.z.string().nullish(),
2933
+ /**
2934
+ * The retention policy for the prompt cache.
2935
+ * - 'in_memory': Default. Standard prompt caching behavior.
2936
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
2937
+ * Currently only available for 5.1 series models.
2938
+ *
2939
+ * @default 'in_memory'
2940
+ */
2941
+ promptCacheRetention: import_v417.z.enum(["in_memory", "24h"]).nullish(),
2923
2942
  reasoningEffort: import_v417.z.string().nullish(),
2924
2943
  reasoningSummary: import_v417.z.string().nullish(),
2925
2944
  safetyIdentifier: import_v417.z.string().nullish(),
@@ -3203,6 +3222,7 @@ var OpenAIResponsesLanguageModel = class {
3203
3222
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3204
3223
  include,
3205
3224
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3225
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3206
3226
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3207
3227
  top_logprobs: topLogprobs,
3208
3228
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
@@ -4464,7 +4484,7 @@ var OpenAITranscriptionModel = class {
4464
4484
  };
4465
4485
 
4466
4486
  // src/version.ts
4467
- var VERSION = true ? "2.0.66" : "0.0.0-test";
4487
+ var VERSION = true ? "2.0.67" : "0.0.0-test";
4468
4488
 
4469
4489
  // src/openai-provider.ts
4470
4490
  function createOpenAI(options = {}) {