@ai-sdk/openai 3.0.0-beta.102 → 3.0.0-beta.105

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,6 +19,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
19
19
  promptCacheKey?: string | undefined;
20
20
  promptCacheRetention?: "in_memory" | "24h" | undefined;
21
21
  safetyIdentifier?: string | undefined;
22
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
23
+ forceReasoning?: boolean | undefined;
22
24
  }>;
23
25
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
24
26
 
@@ -115,7 +117,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
115
117
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
116
118
  }
117
119
 
118
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
120
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
119
121
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
120
122
  declare const hasDefaultResponseFormat: Set<string>;
121
123
 
@@ -131,7 +133,7 @@ declare class OpenAIImageModel implements ImageModelV3 {
131
133
  get maxImagesPerCall(): number;
132
134
  get provider(): string;
133
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
134
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
135
137
  }
136
138
 
137
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
@@ -525,7 +527,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
525
527
  }>;
526
528
 
527
529
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
528
- background?: "auto" | "opaque" | "transparent" | undefined;
530
+ background?: "auto" | "transparent" | "opaque" | undefined;
529
531
  inputFidelity?: "low" | "high" | undefined;
530
532
  inputImageMask?: {
531
533
  fileId?: string | undefined;
@@ -19,6 +19,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
19
19
  promptCacheKey?: string | undefined;
20
20
  promptCacheRetention?: "in_memory" | "24h" | undefined;
21
21
  safetyIdentifier?: string | undefined;
22
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
23
+ forceReasoning?: boolean | undefined;
22
24
  }>;
23
25
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
24
26
 
@@ -115,7 +117,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
115
117
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
116
118
  }
117
119
 
118
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
120
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
119
121
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
120
122
  declare const hasDefaultResponseFormat: Set<string>;
121
123
 
@@ -131,7 +133,7 @@ declare class OpenAIImageModel implements ImageModelV3 {
131
133
  get maxImagesPerCall(): number;
132
134
  get provider(): string;
133
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
134
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
135
137
  }
136
138
 
137
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
@@ -525,7 +527,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
525
527
  }>;
526
528
 
527
529
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
528
- background?: "auto" | "opaque" | "transparent" | undefined;
530
+ background?: "auto" | "transparent" | "opaque" | undefined;
529
531
  inputFidelity?: "low" | "high" | undefined;
530
532
  inputImageMask?: {
531
533
  fileId?: string | undefined;
@@ -83,7 +83,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
83
83
  function getOpenAILanguageModelCapabilities(modelId) {
84
84
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
85
85
  const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
86
- const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
86
+ const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
87
87
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
88
88
  const systemMessageMode = isReasoningModel ? "developer" : "system";
89
89
  return {
@@ -583,7 +583,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
583
583
  * username or email address, in order to avoid sending us any identifying
584
584
  * information.
585
585
  */
586
- safetyIdentifier: import_v43.z.string().optional()
586
+ safetyIdentifier: import_v43.z.string().optional(),
587
+ /**
588
+ * Override the system message mode for this model.
589
+ * - 'system': Use the 'system' role for system messages (default for most models)
590
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
591
+ * - 'remove': Remove system messages entirely
592
+ *
593
+ * If not specified, the mode is automatically determined based on the model.
594
+ */
595
+ systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
596
+ /**
597
+ * Force treating this model as a reasoning model.
598
+ *
599
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
600
+ * where the model ID is not recognized by the SDK's allowlist.
601
+ *
602
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
603
+ * and defaults `systemMessageMode` to `developer` unless overridden.
604
+ */
605
+ forceReasoning: import_v43.z.boolean().optional()
587
606
  })
588
607
  )
589
608
  );
@@ -678,7 +697,7 @@ var OpenAIChatLanguageModel = class {
678
697
  toolChoice,
679
698
  providerOptions
680
699
  }) {
681
- var _a, _b, _c;
700
+ var _a, _b, _c, _d, _e;
682
701
  const warnings = [];
683
702
  const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
684
703
  provider: "openai",
@@ -686,17 +705,18 @@ var OpenAIChatLanguageModel = class {
686
705
  schema: openaiChatLanguageModelOptions
687
706
  })) != null ? _a : {};
688
707
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
708
+ const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
689
709
  if (topK != null) {
690
710
  warnings.push({ type: "unsupported", feature: "topK" });
691
711
  }
692
712
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
693
713
  {
694
714
  prompt,
695
- systemMessageMode: modelCapabilities.systemMessageMode
715
+ systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
696
716
  }
697
717
  );
698
718
  warnings.push(...messageWarnings);
699
- const strictJsonSchema = (_b = openaiOptions.strictJsonSchema) != null ? _b : true;
719
+ const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
700
720
  const baseArgs = {
701
721
  // model id:
702
722
  model: this.modelId,
@@ -717,7 +737,7 @@ var OpenAIChatLanguageModel = class {
717
737
  json_schema: {
718
738
  schema: responseFormat.schema,
719
739
  strict: strictJsonSchema,
720
- name: (_c = responseFormat.name) != null ? _c : "response",
740
+ name: (_e = responseFormat.name) != null ? _e : "response",
721
741
  description: responseFormat.description
722
742
  }
723
743
  } : { type: "json_object" } : void 0,
@@ -738,7 +758,7 @@ var OpenAIChatLanguageModel = class {
738
758
  // messages:
739
759
  messages
740
760
  };
741
- if (modelCapabilities.isReasoningModel) {
761
+ if (isReasoningModel) {
742
762
  if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
743
763
  if (baseArgs.temperature != null) {
744
764
  baseArgs.temperature = void 0;
@@ -1733,11 +1753,13 @@ var modelMaxImagesPerCall = {
1733
1753
  "dall-e-3": 1,
1734
1754
  "dall-e-2": 10,
1735
1755
  "gpt-image-1": 10,
1736
- "gpt-image-1-mini": 10
1756
+ "gpt-image-1-mini": 10,
1757
+ "gpt-image-1.5": 10
1737
1758
  };
1738
1759
  var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1739
1760
  "gpt-image-1",
1740
- "gpt-image-1-mini"
1761
+ "gpt-image-1-mini",
1762
+ "gpt-image-1.5"
1741
1763
  ]);
1742
1764
 
1743
1765
  // src/image/openai-image-model.ts
@@ -1756,6 +1778,8 @@ var OpenAIImageModel = class {
1756
1778
  }
1757
1779
  async doGenerate({
1758
1780
  prompt,
1781
+ files,
1782
+ mask,
1759
1783
  n,
1760
1784
  size,
1761
1785
  aspectRatio,
@@ -1764,7 +1788,7 @@ var OpenAIImageModel = class {
1764
1788
  headers,
1765
1789
  abortSignal
1766
1790
  }) {
1767
- var _a, _b, _c, _d, _e, _f, _g;
1791
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1768
1792
  const warnings = [];
1769
1793
  if (aspectRatio != null) {
1770
1794
  warnings.push({
@@ -1777,6 +1801,72 @@ var OpenAIImageModel = class {
1777
1801
  warnings.push({ type: "unsupported", feature: "seed" });
1778
1802
  }
1779
1803
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1804
+ if (files != null) {
1805
+ const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
1806
+ url: this.config.url({
1807
+ path: "/images/edits",
1808
+ modelId: this.modelId
1809
+ }),
1810
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
1811
+ formData: (0, import_provider_utils13.convertToFormData)({
1812
+ model: this.modelId,
1813
+ prompt,
1814
+ image: await Promise.all(
1815
+ files.map(
1816
+ (file) => file.type === "file" ? new Blob(
1817
+ [
1818
+ file.data instanceof Uint8Array ? new Blob([file.data], {
1819
+ type: file.mediaType
1820
+ }) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
1821
+ type: file.mediaType
1822
+ })
1823
+ ],
1824
+ { type: file.mediaType }
1825
+ ) : (0, import_provider_utils13.downloadBlob)(file.url)
1826
+ )
1827
+ ),
1828
+ mask: mask != null ? await fileToBlob(mask) : void 0,
1829
+ n,
1830
+ size,
1831
+ ...(_d = providerOptions.openai) != null ? _d : {}
1832
+ }),
1833
+ failedResponseHandler: openaiFailedResponseHandler,
1834
+ successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
1835
+ openaiImageResponseSchema
1836
+ ),
1837
+ abortSignal,
1838
+ fetch: this.config.fetch
1839
+ });
1840
+ return {
1841
+ images: response2.data.map((item) => item.b64_json),
1842
+ warnings,
1843
+ usage: response2.usage != null ? {
1844
+ inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
1845
+ outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
1846
+ totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
1847
+ } : void 0,
1848
+ response: {
1849
+ timestamp: currentDate,
1850
+ modelId: this.modelId,
1851
+ headers: responseHeaders2
1852
+ },
1853
+ providerMetadata: {
1854
+ openai: {
1855
+ images: response2.data.map((item) => {
1856
+ var _a2, _b2, _c2, _d2, _e2;
1857
+ return {
1858
+ ...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
1859
+ created: (_a2 = response2.created) != null ? _a2 : void 0,
1860
+ size: (_b2 = response2.size) != null ? _b2 : void 0,
1861
+ quality: (_c2 = response2.quality) != null ? _c2 : void 0,
1862
+ background: (_d2 = response2.background) != null ? _d2 : void 0,
1863
+ outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
1864
+ };
1865
+ })
1866
+ }
1867
+ }
1868
+ };
1869
+ }
1780
1870
  const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
1781
1871
  url: this.config.url({
1782
1872
  path: "/images/generations",
@@ -1788,7 +1878,7 @@ var OpenAIImageModel = class {
1788
1878
  prompt,
1789
1879
  n,
1790
1880
  size,
1791
- ...(_d = providerOptions.openai) != null ? _d : {},
1881
+ ...(_h = providerOptions.openai) != null ? _h : {},
1792
1882
  ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1793
1883
  },
1794
1884
  failedResponseHandler: openaiFailedResponseHandler,
@@ -1802,9 +1892,9 @@ var OpenAIImageModel = class {
1802
1892
  images: response.data.map((item) => item.b64_json),
1803
1893
  warnings,
1804
1894
  usage: response.usage != null ? {
1805
- inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
1806
- outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
1807
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
1895
+ inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
1896
+ outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
1897
+ totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
1808
1898
  } : void 0,
1809
1899
  response: {
1810
1900
  timestamp: currentDate,
@@ -1829,6 +1919,14 @@ var OpenAIImageModel = class {
1829
1919
  };
1830
1920
  }
1831
1921
  };
1922
+ async function fileToBlob(file) {
1923
+ if (!file) return void 0;
1924
+ if (file.type === "url") {
1925
+ return (0, import_provider_utils13.downloadBlob)(file.url);
1926
+ }
1927
+ const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
1928
+ return new Blob([data], { type: file.mediaType });
1929
+ }
1832
1930
 
1833
1931
  // src/transcription/openai-transcription-model.ts
1834
1932
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
@@ -3578,7 +3676,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
3578
3676
  * Defaults to `undefined`.
3579
3677
  * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
3580
3678
  */
3581
- user: import_v417.z.string().nullish()
3679
+ user: import_v417.z.string().nullish(),
3680
+ /**
3681
+ * Override the system message mode for this model.
3682
+ * - 'system': Use the 'system' role for system messages (default for most models)
3683
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
3684
+ * - 'remove': Remove system messages entirely
3685
+ *
3686
+ * If not specified, the mode is automatically determined based on the model.
3687
+ */
3688
+ systemMessageMode: import_v417.z.enum(["system", "developer", "remove"]).optional(),
3689
+ /**
3690
+ * Force treating this model as a reasoning model.
3691
+ *
3692
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
3693
+ * where the model ID is not recognized by the SDK's allowlist.
3694
+ *
3695
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
3696
+ * and defaults `systemMessageMode` to `developer` unless overridden.
3697
+ */
3698
+ forceReasoning: import_v417.z.boolean().optional()
3582
3699
  })
3583
3700
  )
3584
3701
  );
@@ -4105,7 +4222,7 @@ var OpenAIResponsesLanguageModel = class {
4105
4222
  toolChoice,
4106
4223
  responseFormat
4107
4224
  }) {
4108
- var _a, _b, _c, _d;
4225
+ var _a, _b, _c, _d, _e, _f;
4109
4226
  const warnings = [];
4110
4227
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
4111
4228
  if (topK != null) {
@@ -4128,6 +4245,7 @@ var OpenAIResponsesLanguageModel = class {
4128
4245
  providerOptions,
4129
4246
  schema: openaiResponsesProviderOptionsSchema
4130
4247
  });
4248
+ const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
4131
4249
  if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
4132
4250
  warnings.push({
4133
4251
  type: "unsupported",
@@ -4152,15 +4270,15 @@ var OpenAIResponsesLanguageModel = class {
4152
4270
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
4153
4271
  prompt,
4154
4272
  toolNameMapping,
4155
- systemMessageMode: modelCapabilities.systemMessageMode,
4273
+ systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
4156
4274
  fileIdPrefixes: this.config.fileIdPrefixes,
4157
- store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
4275
+ store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
4158
4276
  hasLocalShellTool: hasOpenAITool("openai.local_shell"),
4159
4277
  hasShellTool: hasOpenAITool("openai.shell"),
4160
4278
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
4161
4279
  });
4162
4280
  warnings.push(...inputWarnings);
4163
- const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : true;
4281
+ const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
4164
4282
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
4165
4283
  function addInclude(key) {
4166
4284
  if (include == null) {
@@ -4176,9 +4294,9 @@ var OpenAIResponsesLanguageModel = class {
4176
4294
  if (topLogprobs) {
4177
4295
  addInclude("message.output_text.logprobs");
4178
4296
  }
4179
- const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
4297
+ const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
4180
4298
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
4181
- )) == null ? void 0 : _c.name;
4299
+ )) == null ? void 0 : _e.name;
4182
4300
  if (webSearchToolName) {
4183
4301
  addInclude("web_search_call.action.sources");
4184
4302
  }
@@ -4186,7 +4304,7 @@ var OpenAIResponsesLanguageModel = class {
4186
4304
  addInclude("code_interpreter_call.outputs");
4187
4305
  }
4188
4306
  const store = openaiOptions == null ? void 0 : openaiOptions.store;
4189
- if (store === false && modelCapabilities.isReasoningModel) {
4307
+ if (store === false && isReasoningModel) {
4190
4308
  addInclude("reasoning.encrypted_content");
4191
4309
  }
4192
4310
  const baseArgs = {
@@ -4201,7 +4319,7 @@ var OpenAIResponsesLanguageModel = class {
4201
4319
  format: responseFormat.schema != null ? {
4202
4320
  type: "json_schema",
4203
4321
  strict: strictJsonSchema,
4204
- name: (_d = responseFormat.name) != null ? _d : "response",
4322
+ name: (_f = responseFormat.name) != null ? _f : "response",
4205
4323
  description: responseFormat.description,
4206
4324
  schema: responseFormat.schema
4207
4325
  } : { type: "json_object" }
@@ -4228,7 +4346,7 @@ var OpenAIResponsesLanguageModel = class {
4228
4346
  top_logprobs: topLogprobs,
4229
4347
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
4230
4348
  // model-specific settings:
4231
- ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4349
+ ...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4232
4350
  reasoning: {
4233
4351
  ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
4234
4352
  effort: openaiOptions.reasoningEffort
@@ -4239,7 +4357,7 @@ var OpenAIResponsesLanguageModel = class {
4239
4357
  }
4240
4358
  }
4241
4359
  };
4242
- if (modelCapabilities.isReasoningModel) {
4360
+ if (isReasoningModel) {
4243
4361
  if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
4244
4362
  if (baseArgs.temperature != null) {
4245
4363
  baseArgs.temperature = void 0;