@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -477,6 +477,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
477
477
  * Useful for improving cache hit rates and working around automatic caching issues.
478
478
  */
479
479
  promptCacheKey: z3.string().optional(),
480
+ /**
481
+ * The retention policy for the prompt cache.
482
+ * - 'in_memory': Default. Standard prompt caching behavior.
483
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
484
+ * Currently only available for 5.1 series models.
485
+ *
486
+ * @default 'in_memory'
487
+ */
488
+ promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
480
489
  /**
481
490
  * A stable identifier used to help detect users of your application
482
491
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -645,6 +654,7 @@ var OpenAIChatLanguageModel = class {
645
654
  reasoning_effort: openaiOptions.reasoningEffort,
646
655
  service_tier: openaiOptions.serviceTier,
647
656
  prompt_cache_key: openaiOptions.promptCacheKey,
657
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
648
658
  safety_identifier: openaiOptions.safetyIdentifier,
649
659
  // messages:
650
660
  messages
@@ -3121,6 +3131,7 @@ var openaiResponsesModelIds = [
3121
3131
  var openaiResponsesProviderOptionsSchema = lazySchema13(
3122
3132
  () => zodSchema13(
3123
3133
  z15.object({
3134
+ conversation: z15.string().nullish(),
3124
3135
  include: z15.array(
3125
3136
  z15.enum([
3126
3137
  "reasoning.encrypted_content",
@@ -3153,6 +3164,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema13(
3153
3164
  parallelToolCalls: z15.boolean().nullish(),
3154
3165
  previousResponseId: z15.string().nullish(),
3155
3166
  promptCacheKey: z15.string().nullish(),
3167
+ /**
3168
+ * The retention policy for the prompt cache.
3169
+ * - 'in_memory': Default. Standard prompt caching behavior.
3170
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
3171
+ * Currently only available for 5.1 series models.
3172
+ *
3173
+ * @default 'in_memory'
3174
+ */
3175
+ promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
3156
3176
  reasoningEffort: z15.string().nullish(),
3157
3177
  reasoningSummary: z15.string().nullish(),
3158
3178
  safetyIdentifier: z15.string().nullish(),
@@ -3742,6 +3762,13 @@ var OpenAIResponsesLanguageModel = class {
3742
3762
  providerOptions,
3743
3763
  schema: openaiResponsesProviderOptionsSchema
3744
3764
  });
3765
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3766
+ warnings.push({
3767
+ type: "unsupported-setting",
3768
+ setting: "conversation",
3769
+ details: "conversation and previousResponseId cannot be used together"
3770
+ });
3771
+ }
3745
3772
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3746
3773
  prompt,
3747
3774
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3804,6 +3831,7 @@ var OpenAIResponsesLanguageModel = class {
3804
3831
  }
3805
3832
  },
3806
3833
  // provider options:
3834
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3807
3835
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3808
3836
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3809
3837
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3814,6 +3842,7 @@ var OpenAIResponsesLanguageModel = class {
3814
3842
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3815
3843
  include,
3816
3844
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3845
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3817
3846
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3818
3847
  top_logprobs: topLogprobs,
3819
3848
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,