@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
@@ -515,6 +515,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
515
515
  * Useful for improving cache hit rates and working around automatic caching issues.
516
516
  */
517
517
  promptCacheKey: import_v43.z.string().optional(),
518
+ /**
519
+ * The retention policy for the prompt cache.
520
+ * - 'in_memory': Default. Standard prompt caching behavior.
521
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
522
+ * Currently only available for 5.1 series models.
523
+ *
524
+ * @default 'in_memory'
525
+ */
526
+ promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
518
527
  /**
519
528
  * A stable identifier used to help detect users of your application
520
529
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -681,6 +690,7 @@ var OpenAIChatLanguageModel = class {
681
690
  reasoning_effort: openaiOptions.reasoningEffort,
682
691
  service_tier: openaiOptions.serviceTier,
683
692
  prompt_cache_key: openaiOptions.promptCacheKey,
693
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
684
694
  safety_identifier: openaiOptions.safetyIdentifier,
685
695
  // messages:
686
696
  messages
@@ -3137,6 +3147,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazySchem
3137
3147
  parallelToolCalls: import_v415.z.boolean().nullish(),
3138
3148
  previousResponseId: import_v415.z.string().nullish(),
3139
3149
  promptCacheKey: import_v415.z.string().nullish(),
3150
+ /**
3151
+ * The retention policy for the prompt cache.
3152
+ * - 'in_memory': Default. Standard prompt caching behavior.
3153
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
3154
+ * Currently only available for 5.1 series models.
3155
+ *
3156
+ * @default 'in_memory'
3157
+ */
3158
+ promptCacheRetention: import_v415.z.enum(["in_memory", "24h"]).nullish(),
3140
3159
  reasoningEffort: import_v415.z.string().nullish(),
3141
3160
  reasoningSummary: import_v415.z.string().nullish(),
3142
3161
  safetyIdentifier: import_v415.z.string().nullish(),
@@ -3772,6 +3791,7 @@ var OpenAIResponsesLanguageModel = class {
3772
3791
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3773
3792
  include,
3774
3793
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3794
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3775
3795
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3776
3796
  top_logprobs: topLogprobs,
3777
3797
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,