@ai-sdk/openai 3.0.0-beta.24 → 3.0.0-beta.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,9 @@
1
1
  import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
7
7
  logitBias?: Record<number, number> | undefined;
8
8
  logprobs?: number | boolean | undefined;
9
9
  parallelToolCalls?: boolean | undefined;
@@ -20,7 +20,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
20
20
  promptCacheKey?: string | undefined;
21
21
  safetyIdentifier?: string | undefined;
22
22
  }>;
23
- type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
23
+ type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
24
24
 
25
25
  type OpenAIChatConfig = {
26
26
  provider: string;
@@ -46,14 +46,14 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
46
46
  }
47
47
 
48
48
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
49
- declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
49
+ declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
50
50
  echo?: boolean | undefined;
51
51
  logitBias?: Record<string, number> | undefined;
52
52
  suffix?: string | undefined;
53
53
  user?: string | undefined;
54
54
  logprobs?: number | boolean | undefined;
55
55
  }>;
56
- type OpenAICompletionProviderOptions = InferValidator<typeof openaiCompletionProviderOptions>;
56
+ type OpenAICompletionProviderOptions = InferSchema<typeof openaiCompletionProviderOptions>;
57
57
 
58
58
  type OpenAICompletionConfig = {
59
59
  provider: string;
@@ -98,11 +98,11 @@ type OpenAIConfig = {
98
98
  };
99
99
 
100
100
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
101
- declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
101
+ declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazySchema<{
102
102
  dimensions?: number | undefined;
103
103
  user?: string | undefined;
104
104
  }>;
105
- type OpenAIEmbeddingProviderOptions = InferValidator<typeof openaiEmbeddingProviderOptions>;
105
+ type OpenAIEmbeddingProviderOptions = InferSchema<typeof openaiEmbeddingProviderOptions>;
106
106
 
107
107
  declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
108
108
  readonly specificationVersion = "v3";
@@ -135,14 +135,14 @@ declare class OpenAIImageModel implements ImageModelV3 {
135
135
  }
136
136
 
137
137
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
138
- declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
138
+ declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
139
139
  include?: string[] | undefined;
140
140
  language?: string | undefined;
141
141
  prompt?: string | undefined;
142
142
  temperature?: number | undefined;
143
143
  timestampGranularities?: ("word" | "segment")[] | undefined;
144
144
  }>;
145
- type OpenAITranscriptionProviderOptions = InferValidator<typeof openAITranscriptionProviderOptions>;
145
+ type OpenAITranscriptionProviderOptions = InferSchema<typeof openAITranscriptionProviderOptions>;
146
146
 
147
147
  type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
148
148
  providerOptions?: {
@@ -165,11 +165,11 @@ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
165
165
  }
166
166
 
167
167
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
168
- declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
168
+ declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
169
169
  instructions?: string | null | undefined;
170
170
  speed?: number | null | undefined;
171
171
  }>;
172
- type OpenAISpeechCallOptions = InferValidator<typeof openaiSpeechProviderOptionsSchema>;
172
+ type OpenAISpeechCallOptions = InferSchema<typeof openaiSpeechProviderOptionsSchema>;
173
173
 
174
174
  interface OpenAISpeechModelConfig extends OpenAIConfig {
175
175
  _internal?: {
@@ -1,9 +1,9 @@
1
1
  import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
6
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
7
7
  logitBias?: Record<number, number> | undefined;
8
8
  logprobs?: number | boolean | undefined;
9
9
  parallelToolCalls?: boolean | undefined;
@@ -20,7 +20,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
20
20
  promptCacheKey?: string | undefined;
21
21
  safetyIdentifier?: string | undefined;
22
22
  }>;
23
- type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
23
+ type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
24
24
 
25
25
  type OpenAIChatConfig = {
26
26
  provider: string;
@@ -46,14 +46,14 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
46
46
  }
47
47
 
48
48
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
49
- declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
49
+ declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
50
50
  echo?: boolean | undefined;
51
51
  logitBias?: Record<string, number> | undefined;
52
52
  suffix?: string | undefined;
53
53
  user?: string | undefined;
54
54
  logprobs?: number | boolean | undefined;
55
55
  }>;
56
- type OpenAICompletionProviderOptions = InferValidator<typeof openaiCompletionProviderOptions>;
56
+ type OpenAICompletionProviderOptions = InferSchema<typeof openaiCompletionProviderOptions>;
57
57
 
58
58
  type OpenAICompletionConfig = {
59
59
  provider: string;
@@ -98,11 +98,11 @@ type OpenAIConfig = {
98
98
  };
99
99
 
100
100
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
101
- declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
101
+ declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazySchema<{
102
102
  dimensions?: number | undefined;
103
103
  user?: string | undefined;
104
104
  }>;
105
- type OpenAIEmbeddingProviderOptions = InferValidator<typeof openaiEmbeddingProviderOptions>;
105
+ type OpenAIEmbeddingProviderOptions = InferSchema<typeof openaiEmbeddingProviderOptions>;
106
106
 
107
107
  declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
108
108
  readonly specificationVersion = "v3";
@@ -135,14 +135,14 @@ declare class OpenAIImageModel implements ImageModelV3 {
135
135
  }
136
136
 
137
137
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
138
- declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
138
+ declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazySchema<{
139
139
  include?: string[] | undefined;
140
140
  language?: string | undefined;
141
141
  prompt?: string | undefined;
142
142
  temperature?: number | undefined;
143
143
  timestampGranularities?: ("word" | "segment")[] | undefined;
144
144
  }>;
145
- type OpenAITranscriptionProviderOptions = InferValidator<typeof openAITranscriptionProviderOptions>;
145
+ type OpenAITranscriptionProviderOptions = InferSchema<typeof openAITranscriptionProviderOptions>;
146
146
 
147
147
  type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
148
148
  providerOptions?: {
@@ -165,11 +165,11 @@ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
165
165
  }
166
166
 
167
167
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
168
- declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
168
+ declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
169
169
  instructions?: string | null | undefined;
170
170
  speed?: number | null | undefined;
171
171
  }>;
172
- type OpenAISpeechCallOptions = InferValidator<typeof openaiSpeechProviderOptionsSchema>;
172
+ type OpenAISpeechCallOptions = InferSchema<typeof openaiSpeechProviderOptionsSchema>;
173
173
 
174
174
  interface OpenAISpeechModelConfig extends OpenAIConfig {
175
175
  _internal?: {
@@ -286,7 +286,7 @@ function mapOpenAIFinishReason(finishReason) {
286
286
  // src/chat/openai-chat-api.ts
287
287
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
288
288
  var import_v42 = require("zod/v4");
289
- var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
289
+ var openaiChatResponseSchema = (0, import_provider_utils3.lazySchema)(
290
290
  () => (0, import_provider_utils3.zodSchema)(
291
291
  import_v42.z.object({
292
292
  id: import_v42.z.string().nullish(),
@@ -351,7 +351,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
351
351
  })
352
352
  )
353
353
  );
354
- var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
354
+ var openaiChatChunkSchema = (0, import_provider_utils3.lazySchema)(
355
355
  () => (0, import_provider_utils3.zodSchema)(
356
356
  import_v42.z.union([
357
357
  import_v42.z.object({
@@ -424,7 +424,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
424
424
  // src/chat/openai-chat-options.ts
425
425
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
426
426
  var import_v43 = require("zod/v4");
427
- var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
427
+ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
428
428
  () => (0, import_provider_utils4.zodSchema)(
429
429
  import_v43.z.object({
430
430
  /**
@@ -1232,7 +1232,7 @@ function mapOpenAIFinishReason2(finishReason) {
1232
1232
  // src/completion/openai-completion-api.ts
1233
1233
  var import_v44 = require("zod/v4");
1234
1234
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1235
- var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
1235
+ var openaiCompletionResponseSchema = (0, import_provider_utils6.lazySchema)(
1236
1236
  () => (0, import_provider_utils6.zodSchema)(
1237
1237
  import_v44.z.object({
1238
1238
  id: import_v44.z.string().nullish(),
@@ -1257,7 +1257,7 @@ var openaiCompletionResponseSchema = (0, import_provider_utils6.lazyValidator)(
1257
1257
  })
1258
1258
  )
1259
1259
  );
1260
- var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
1260
+ var openaiCompletionChunkSchema = (0, import_provider_utils6.lazySchema)(
1261
1261
  () => (0, import_provider_utils6.zodSchema)(
1262
1262
  import_v44.z.union([
1263
1263
  import_v44.z.object({
@@ -1290,7 +1290,7 @@ var openaiCompletionChunkSchema = (0, import_provider_utils6.lazyValidator)(
1290
1290
  // src/completion/openai-completion-options.ts
1291
1291
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1292
1292
  var import_v45 = require("zod/v4");
1293
- var openaiCompletionProviderOptions = (0, import_provider_utils7.lazyValidator)(
1293
+ var openaiCompletionProviderOptions = (0, import_provider_utils7.lazySchema)(
1294
1294
  () => (0, import_provider_utils7.zodSchema)(
1295
1295
  import_v45.z.object({
1296
1296
  /**
@@ -1572,7 +1572,7 @@ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1572
1572
  // src/embedding/openai-embedding-options.ts
1573
1573
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
1574
1574
  var import_v46 = require("zod/v4");
1575
- var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
1575
+ var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazySchema)(
1576
1576
  () => (0, import_provider_utils9.zodSchema)(
1577
1577
  import_v46.z.object({
1578
1578
  /**
@@ -1592,7 +1592,7 @@ var openaiEmbeddingProviderOptions = (0, import_provider_utils9.lazyValidator)(
1592
1592
  // src/embedding/openai-embedding-api.ts
1593
1593
  var import_provider_utils10 = require("@ai-sdk/provider-utils");
1594
1594
  var import_v47 = require("zod/v4");
1595
- var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazyValidator)(
1595
+ var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazySchema)(
1596
1596
  () => (0, import_provider_utils10.zodSchema)(
1597
1597
  import_v47.z.object({
1598
1598
  data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
@@ -1671,7 +1671,7 @@ var import_provider_utils13 = require("@ai-sdk/provider-utils");
1671
1671
  // src/image/openai-image-api.ts
1672
1672
  var import_provider_utils12 = require("@ai-sdk/provider-utils");
1673
1673
  var import_v48 = require("zod/v4");
1674
- var openaiImageResponseSchema = (0, import_provider_utils12.lazyValidator)(
1674
+ var openaiImageResponseSchema = (0, import_provider_utils12.lazySchema)(
1675
1675
  () => (0, import_provider_utils12.zodSchema)(
1676
1676
  import_v48.z.object({
1677
1677
  data: import_v48.z.array(
@@ -1781,7 +1781,7 @@ var import_provider_utils16 = require("@ai-sdk/provider-utils");
1781
1781
  // src/transcription/openai-transcription-api.ts
1782
1782
  var import_provider_utils14 = require("@ai-sdk/provider-utils");
1783
1783
  var import_v49 = require("zod/v4");
1784
- var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidator)(
1784
+ var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazySchema)(
1785
1785
  () => (0, import_provider_utils14.zodSchema)(
1786
1786
  import_v49.z.object({
1787
1787
  text: import_v49.z.string(),
@@ -1815,7 +1815,7 @@ var openaiTranscriptionResponseSchema = (0, import_provider_utils14.lazyValidato
1815
1815
  // src/transcription/openai-transcription-options.ts
1816
1816
  var import_provider_utils15 = require("@ai-sdk/provider-utils");
1817
1817
  var import_v410 = require("zod/v4");
1818
- var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazyValidator)(
1818
+ var openAITranscriptionProviderOptions = (0, import_provider_utils15.lazySchema)(
1819
1819
  () => (0, import_provider_utils15.zodSchema)(
1820
1820
  import_v410.z.object({
1821
1821
  /**
@@ -2017,7 +2017,7 @@ var import_provider_utils18 = require("@ai-sdk/provider-utils");
2017
2017
  // src/speech/openai-speech-options.ts
2018
2018
  var import_provider_utils17 = require("@ai-sdk/provider-utils");
2019
2019
  var import_v411 = require("zod/v4");
2020
- var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazyValidator)(
2020
+ var openaiSpeechProviderOptionsSchema = (0, import_provider_utils17.lazySchema)(
2021
2021
  () => (0, import_provider_utils17.zodSchema)(
2022
2022
  import_v411.z.object({
2023
2023
  instructions: import_v411.z.string().nullish(),
@@ -2454,7 +2454,7 @@ function mapOpenAIResponseFinishReason({
2454
2454
  // src/responses/openai-responses-api.ts
2455
2455
  var import_provider_utils21 = require("@ai-sdk/provider-utils");
2456
2456
  var import_v414 = require("zod/v4");
2457
- var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2457
+ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazySchema)(
2458
2458
  () => (0, import_provider_utils21.zodSchema)(
2459
2459
  import_v414.z.union([
2460
2460
  import_v414.z.object({
@@ -2714,7 +2714,7 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2714
2714
  ])
2715
2715
  )
2716
2716
  );
2717
- var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2717
+ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazySchema)(
2718
2718
  () => (0, import_provider_utils21.zodSchema)(
2719
2719
  import_v414.z.object({
2720
2720
  id: import_v414.z.string(),
@@ -2934,12 +2934,13 @@ var openaiResponsesModelIds = [
2934
2934
  "gpt-5-chat-latest",
2935
2935
  ...openaiResponsesReasoningModelIds
2936
2936
  ];
2937
- var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
2937
+ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazySchema)(
2938
2938
  () => (0, import_provider_utils22.zodSchema)(
2939
2939
  import_v415.z.object({
2940
2940
  include: import_v415.z.array(
2941
2941
  import_v415.z.enum([
2942
2942
  "reasoning.encrypted_content",
2943
+ // handled internally by default, only needed for unknown reasoning models
2943
2944
  "file_search_call.results",
2944
2945
  "message.output_text.logprobs"
2945
2946
  ])
@@ -3438,7 +3439,11 @@ var OpenAIResponsesLanguageModel = class {
3438
3439
  const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
3439
3440
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
3440
3441
  function addInclude(key) {
3441
- include = include != null ? [...include, key] : [key];
3442
+ if (include == null) {
3443
+ include = [key];
3444
+ } else if (!include.includes(key)) {
3445
+ include = [...include, key];
3446
+ }
3442
3447
  }
3443
3448
  function hasOpenAITool(id) {
3444
3449
  return (tools == null ? void 0 : tools.find(
@@ -3458,6 +3463,10 @@ var OpenAIResponsesLanguageModel = class {
3458
3463
  if (hasOpenAITool("openai.code_interpreter")) {
3459
3464
  addInclude("code_interpreter_call.outputs");
3460
3465
  }
3466
+ const store = openaiOptions == null ? void 0 : openaiOptions.store;
3467
+ if (store === false && modelConfig.isReasoningModel) {
3468
+ addInclude("reasoning.encrypted_content");
3469
+ }
3461
3470
  const baseArgs = {
3462
3471
  model: this.modelId,
3463
3472
  input,
@@ -3485,7 +3494,7 @@ var OpenAIResponsesLanguageModel = class {
3485
3494
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3486
3495
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
3487
3496
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
3488
- store: openaiOptions == null ? void 0 : openaiOptions.store,
3497
+ store,
3489
3498
  user: openaiOptions == null ? void 0 : openaiOptions.user,
3490
3499
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
3491
3500
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,