@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.0-beta.60
4
+
5
+ ### Patch Changes
6
+
7
+ - 0877683: feat(provider/openai): support conversations api
8
+
9
+ ## 3.0.0-beta.59
10
+
11
+ ### Patch Changes
12
+
13
+ - d0f1baf: feat(openai): Add support for 'promptCacheRetention: 24h' for gpt5.1 series
14
+
3
15
  ## 3.0.0-beta.58
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
19
19
  strictJsonSchema?: boolean | undefined;
20
20
  textVerbosity?: "low" | "medium" | "high" | undefined;
21
21
  promptCacheKey?: string | undefined;
22
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
22
23
  safetyIdentifier?: string | undefined;
23
24
  }>;
24
25
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
@@ -381,6 +382,7 @@ declare const openaiTools: {
381
382
 
382
383
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
383
384
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
385
+ conversation?: string | null | undefined;
384
386
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
385
387
  instructions?: string | null | undefined;
386
388
  logprobs?: number | boolean | undefined;
@@ -389,6 +391,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
389
391
  parallelToolCalls?: boolean | null | undefined;
390
392
  previousResponseId?: string | null | undefined;
391
393
  promptCacheKey?: string | null | undefined;
394
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
392
395
  reasoningEffort?: string | null | undefined;
393
396
  reasoningSummary?: string | null | undefined;
394
397
  safetyIdentifier?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
19
19
  strictJsonSchema?: boolean | undefined;
20
20
  textVerbosity?: "low" | "medium" | "high" | undefined;
21
21
  promptCacheKey?: string | undefined;
22
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
22
23
  safetyIdentifier?: string | undefined;
23
24
  }>;
24
25
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
@@ -381,6 +382,7 @@ declare const openaiTools: {
381
382
 
382
383
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
383
384
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
385
+ conversation?: string | null | undefined;
384
386
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
385
387
  instructions?: string | null | undefined;
386
388
  logprobs?: number | boolean | undefined;
@@ -389,6 +391,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
389
391
  parallelToolCalls?: boolean | null | undefined;
390
392
  previousResponseId?: string | null | undefined;
391
393
  promptCacheKey?: string | null | undefined;
394
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
392
395
  reasoningEffort?: string | null | undefined;
393
396
  reasoningSummary?: string | null | undefined;
394
397
  safetyIdentifier?: string | null | undefined;
package/dist/index.js CHANGED
@@ -496,6 +496,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
496
496
  * Useful for improving cache hit rates and working around automatic caching issues.
497
497
  */
498
498
  promptCacheKey: import_v43.z.string().optional(),
499
+ /**
500
+ * The retention policy for the prompt cache.
501
+ * - 'in_memory': Default. Standard prompt caching behavior.
502
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
503
+ * Currently only available for 5.1 series models.
504
+ *
505
+ * @default 'in_memory'
506
+ */
507
+ promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
499
508
  /**
500
509
  * A stable identifier used to help detect users of your application
501
510
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -662,6 +671,7 @@ var OpenAIChatLanguageModel = class {
662
671
  reasoning_effort: openaiOptions.reasoningEffort,
663
672
  service_tier: openaiOptions.serviceTier,
664
673
  prompt_cache_key: openaiOptions.promptCacheKey,
674
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
665
675
  safety_identifier: openaiOptions.safetyIdentifier,
666
676
  // messages:
667
677
  messages
@@ -3147,6 +3157,7 @@ var openaiResponsesModelIds = [
3147
3157
  var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchema)(
3148
3158
  () => (0, import_provider_utils23.zodSchema)(
3149
3159
  import_v418.z.object({
3160
+ conversation: import_v418.z.string().nullish(),
3150
3161
  include: import_v418.z.array(
3151
3162
  import_v418.z.enum([
3152
3163
  "reasoning.encrypted_content",
@@ -3179,6 +3190,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
3179
3190
  parallelToolCalls: import_v418.z.boolean().nullish(),
3180
3191
  previousResponseId: import_v418.z.string().nullish(),
3181
3192
  promptCacheKey: import_v418.z.string().nullish(),
3193
+ /**
3194
+ * The retention policy for the prompt cache.
3195
+ * - 'in_memory': Default. Standard prompt caching behavior.
3196
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
3197
+ * Currently only available for 5.1 series models.
3198
+ *
3199
+ * @default 'in_memory'
3200
+ */
3201
+ promptCacheRetention: import_v418.z.enum(["in_memory", "24h"]).nullish(),
3182
3202
  reasoningEffort: import_v418.z.string().nullish(),
3183
3203
  reasoningSummary: import_v418.z.string().nullish(),
3184
3204
  safetyIdentifier: import_v418.z.string().nullish(),
@@ -3421,6 +3441,13 @@ var OpenAIResponsesLanguageModel = class {
3421
3441
  providerOptions,
3422
3442
  schema: openaiResponsesProviderOptionsSchema
3423
3443
  });
3444
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3445
+ warnings.push({
3446
+ type: "unsupported-setting",
3447
+ setting: "conversation",
3448
+ details: "conversation and previousResponseId cannot be used together"
3449
+ });
3450
+ }
3424
3451
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3425
3452
  prompt,
3426
3453
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3483,6 +3510,7 @@ var OpenAIResponsesLanguageModel = class {
3483
3510
  }
3484
3511
  },
3485
3512
  // provider options:
3513
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3486
3514
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3487
3515
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3488
3516
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3493,6 +3521,7 @@ var OpenAIResponsesLanguageModel = class {
3493
3521
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3494
3522
  include,
3495
3523
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3524
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3496
3525
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3497
3526
  top_logprobs: topLogprobs,
3498
3527
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
@@ -4941,7 +4970,7 @@ var OpenAITranscriptionModel = class {
4941
4970
  };
4942
4971
 
4943
4972
  // src/version.ts
4944
- var VERSION = true ? "3.0.0-beta.58" : "0.0.0-test";
4973
+ var VERSION = true ? "3.0.0-beta.60" : "0.0.0-test";
4945
4974
 
4946
4975
  // src/openai-provider.ts
4947
4976
  function createOpenAI(options = {}) {