@ai-sdk/openai 2.0.66 → 2.0.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.68
4
+
5
+ ### Patch Changes
6
+
7
+ - a4c6a12: feat(provider/openai): support conversations api
8
+
9
+ ## 2.0.67
10
+
11
+ ### Patch Changes
12
+
13
+ - 3735f5a: feat(openai): Add support for 'promptCacheRetention: 24h' for gpt5.1 series
14
+
3
15
  ## 2.0.66
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
@@ -324,6 +325,7 @@ declare const openaiTools: {
324
325
 
325
326
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
326
327
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
328
+ conversation?: string | null | undefined;
327
329
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
328
330
  instructions?: string | null | undefined;
329
331
  logprobs?: number | boolean | undefined;
@@ -332,6 +334,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
332
334
  parallelToolCalls?: boolean | null | undefined;
333
335
  previousResponseId?: string | null | undefined;
334
336
  promptCacheKey?: string | null | undefined;
337
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
335
338
  reasoningEffort?: string | null | undefined;
336
339
  reasoningSummary?: string | null | undefined;
337
340
  safetyIdentifier?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
18
18
  strictJsonSchema?: boolean | undefined;
19
19
  textVerbosity?: "low" | "medium" | "high" | undefined;
20
20
  promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
21
22
  safetyIdentifier?: string | undefined;
22
23
  }>;
23
24
  type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
@@ -324,6 +325,7 @@ declare const openaiTools: {
324
325
 
325
326
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
326
327
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
328
+ conversation?: string | null | undefined;
327
329
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
328
330
  instructions?: string | null | undefined;
329
331
  logprobs?: number | boolean | undefined;
@@ -332,6 +334,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyV
332
334
  parallelToolCalls?: boolean | null | undefined;
333
335
  previousResponseId?: string | null | undefined;
334
336
  promptCacheKey?: string | null | undefined;
337
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
335
338
  reasoningEffort?: string | null | undefined;
336
339
  reasoningSummary?: string | null | undefined;
337
340
  safetyIdentifier?: string | null | undefined;
package/dist/index.js CHANGED
@@ -483,6 +483,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
483
483
  * Useful for improving cache hit rates and working around automatic caching issues.
484
484
  */
485
485
  promptCacheKey: import_v43.z.string().optional(),
486
+ /**
487
+ * The retention policy for the prompt cache.
488
+ * - 'in_memory': Default. Standard prompt caching behavior.
489
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
490
+ * Currently only available for 5.1 series models.
491
+ *
492
+ * @default 'in_memory'
493
+ */
494
+ promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
486
495
  /**
487
496
  * A stable identifier used to help detect users of your application
488
497
  * that may be violating OpenAI's usage policies. The IDs should be a
@@ -649,6 +658,7 @@ var OpenAIChatLanguageModel = class {
649
658
  reasoning_effort: openaiOptions.reasoningEffort,
650
659
  service_tier: openaiOptions.serviceTier,
651
660
  prompt_cache_key: openaiOptions.promptCacheKey,
661
+ prompt_cache_retention: openaiOptions.promptCacheRetention,
652
662
  safety_identifier: openaiOptions.safetyIdentifier,
653
663
  // messages:
654
664
  messages
@@ -2888,6 +2898,7 @@ var openaiResponsesModelIds = [
2888
2898
  var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
2889
2899
  () => (0, import_provider_utils22.zodSchema)(
2890
2900
  import_v417.z.object({
2901
+ conversation: import_v417.z.string().nullish(),
2891
2902
  include: import_v417.z.array(
2892
2903
  import_v417.z.enum([
2893
2904
  "reasoning.encrypted_content",
@@ -2920,6 +2931,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
2920
2931
  parallelToolCalls: import_v417.z.boolean().nullish(),
2921
2932
  previousResponseId: import_v417.z.string().nullish(),
2922
2933
  promptCacheKey: import_v417.z.string().nullish(),
2934
+ /**
2935
+ * The retention policy for the prompt cache.
2936
+ * - 'in_memory': Default. Standard prompt caching behavior.
2937
+ * - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
2938
+ * Currently only available for 5.1 series models.
2939
+ *
2940
+ * @default 'in_memory'
2941
+ */
2942
+ promptCacheRetention: import_v417.z.enum(["in_memory", "24h"]).nullish(),
2923
2943
  reasoningEffort: import_v417.z.string().nullish(),
2924
2944
  reasoningSummary: import_v417.z.string().nullish(),
2925
2945
  safetyIdentifier: import_v417.z.string().nullish(),
@@ -3131,6 +3151,13 @@ var OpenAIResponsesLanguageModel = class {
3131
3151
  providerOptions,
3132
3152
  schema: openaiResponsesProviderOptionsSchema
3133
3153
  });
3154
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3155
+ warnings.push({
3156
+ type: "unsupported-setting",
3157
+ setting: "conversation",
3158
+ details: "conversation and previousResponseId cannot be used together"
3159
+ });
3160
+ }
3134
3161
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3135
3162
  prompt,
3136
3163
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3193,6 +3220,7 @@ var OpenAIResponsesLanguageModel = class {
3193
3220
  }
3194
3221
  },
3195
3222
  // provider options:
3223
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3196
3224
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3197
3225
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3198
3226
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3203,6 +3231,7 @@ var OpenAIResponsesLanguageModel = class {
3203
3231
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
3204
3232
  include,
3205
3233
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
3234
+ prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
3206
3235
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
3207
3236
  top_logprobs: topLogprobs,
3208
3237
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
@@ -4464,7 +4493,7 @@ var OpenAITranscriptionModel = class {
4464
4493
  };
4465
4494
 
4466
4495
  // src/version.ts
4467
- var VERSION = true ? "2.0.66" : "0.0.0-test";
4496
+ var VERSION = true ? "2.0.68" : "0.0.0-test";
4468
4497
 
4469
4498
  // src/openai-provider.ts
4470
4499
  function createOpenAI(options = {}) {