@ai-sdk/openai 3.0.0-beta.104 → 3.0.0-beta.105
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +140 -24
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +148 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -2
- package/dist/internal/index.d.ts +4 -2
- package/dist/internal/index.js +139 -23
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +147 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -35,7 +35,7 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
35
35
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
36
36
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
37
37
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
38
|
-
const isReasoningModel =
|
|
38
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
39
39
|
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
40
40
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
41
41
|
return {
|
|
@@ -537,7 +537,26 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
537
537
|
* username or email address, in order to avoid sending us any identifying
|
|
538
538
|
* information.
|
|
539
539
|
*/
|
|
540
|
-
safetyIdentifier: z3.string().optional()
|
|
540
|
+
safetyIdentifier: z3.string().optional(),
|
|
541
|
+
/**
|
|
542
|
+
* Override the system message mode for this model.
|
|
543
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
544
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
545
|
+
* - 'remove': Remove system messages entirely
|
|
546
|
+
*
|
|
547
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
548
|
+
*/
|
|
549
|
+
systemMessageMode: z3.enum(["system", "developer", "remove"]).optional(),
|
|
550
|
+
/**
|
|
551
|
+
* Force treating this model as a reasoning model.
|
|
552
|
+
*
|
|
553
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
554
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
555
|
+
*
|
|
556
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
557
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
558
|
+
*/
|
|
559
|
+
forceReasoning: z3.boolean().optional()
|
|
541
560
|
})
|
|
542
561
|
)
|
|
543
562
|
);
|
|
@@ -634,7 +653,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
634
653
|
toolChoice,
|
|
635
654
|
providerOptions
|
|
636
655
|
}) {
|
|
637
|
-
var _a, _b, _c;
|
|
656
|
+
var _a, _b, _c, _d, _e;
|
|
638
657
|
const warnings = [];
|
|
639
658
|
const openaiOptions = (_a = await parseProviderOptions({
|
|
640
659
|
provider: "openai",
|
|
@@ -642,17 +661,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
642
661
|
schema: openaiChatLanguageModelOptions
|
|
643
662
|
})) != null ? _a : {};
|
|
644
663
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
664
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
645
665
|
if (topK != null) {
|
|
646
666
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
647
667
|
}
|
|
648
668
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
649
669
|
{
|
|
650
670
|
prompt,
|
|
651
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
671
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
652
672
|
}
|
|
653
673
|
);
|
|
654
674
|
warnings.push(...messageWarnings);
|
|
655
|
-
const strictJsonSchema = (
|
|
675
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
656
676
|
const baseArgs = {
|
|
657
677
|
// model id:
|
|
658
678
|
model: this.modelId,
|
|
@@ -673,7 +693,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
673
693
|
json_schema: {
|
|
674
694
|
schema: responseFormat.schema,
|
|
675
695
|
strict: strictJsonSchema,
|
|
676
|
-
name: (
|
|
696
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
677
697
|
description: responseFormat.description
|
|
678
698
|
}
|
|
679
699
|
} : { type: "json_object" } : void 0,
|
|
@@ -694,7 +714,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
694
714
|
// messages:
|
|
695
715
|
messages
|
|
696
716
|
};
|
|
697
|
-
if (
|
|
717
|
+
if (isReasoningModel) {
|
|
698
718
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
699
719
|
if (baseArgs.temperature != null) {
|
|
700
720
|
baseArgs.temperature = void 0;
|
|
@@ -1670,7 +1690,11 @@ var OpenAIEmbeddingModel = class {
|
|
|
1670
1690
|
// src/image/openai-image-model.ts
|
|
1671
1691
|
import {
|
|
1672
1692
|
combineHeaders as combineHeaders4,
|
|
1693
|
+
convertBase64ToUint8Array,
|
|
1694
|
+
convertToFormData,
|
|
1673
1695
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1696
|
+
downloadBlob,
|
|
1697
|
+
postFormDataToApi,
|
|
1674
1698
|
postJsonToApi as postJsonToApi4
|
|
1675
1699
|
} from "@ai-sdk/provider-utils";
|
|
1676
1700
|
|
|
@@ -1734,6 +1758,8 @@ var OpenAIImageModel = class {
|
|
|
1734
1758
|
}
|
|
1735
1759
|
async doGenerate({
|
|
1736
1760
|
prompt,
|
|
1761
|
+
files,
|
|
1762
|
+
mask,
|
|
1737
1763
|
n,
|
|
1738
1764
|
size,
|
|
1739
1765
|
aspectRatio,
|
|
@@ -1742,7 +1768,7 @@ var OpenAIImageModel = class {
|
|
|
1742
1768
|
headers,
|
|
1743
1769
|
abortSignal
|
|
1744
1770
|
}) {
|
|
1745
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1771
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1746
1772
|
const warnings = [];
|
|
1747
1773
|
if (aspectRatio != null) {
|
|
1748
1774
|
warnings.push({
|
|
@@ -1755,6 +1781,72 @@ var OpenAIImageModel = class {
|
|
|
1755
1781
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1756
1782
|
}
|
|
1757
1783
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1784
|
+
if (files != null) {
|
|
1785
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await postFormDataToApi({
|
|
1786
|
+
url: this.config.url({
|
|
1787
|
+
path: "/images/edits",
|
|
1788
|
+
modelId: this.modelId
|
|
1789
|
+
}),
|
|
1790
|
+
headers: combineHeaders4(this.config.headers(), headers),
|
|
1791
|
+
formData: convertToFormData({
|
|
1792
|
+
model: this.modelId,
|
|
1793
|
+
prompt,
|
|
1794
|
+
image: await Promise.all(
|
|
1795
|
+
files.map(
|
|
1796
|
+
(file) => file.type === "file" ? new Blob(
|
|
1797
|
+
[
|
|
1798
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1799
|
+
type: file.mediaType
|
|
1800
|
+
}) : new Blob([convertBase64ToUint8Array(file.data)], {
|
|
1801
|
+
type: file.mediaType
|
|
1802
|
+
})
|
|
1803
|
+
],
|
|
1804
|
+
{ type: file.mediaType }
|
|
1805
|
+
) : downloadBlob(file.url)
|
|
1806
|
+
)
|
|
1807
|
+
),
|
|
1808
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1809
|
+
n,
|
|
1810
|
+
size,
|
|
1811
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1812
|
+
}),
|
|
1813
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1814
|
+
successfulResponseHandler: createJsonResponseHandler4(
|
|
1815
|
+
openaiImageResponseSchema
|
|
1816
|
+
),
|
|
1817
|
+
abortSignal,
|
|
1818
|
+
fetch: this.config.fetch
|
|
1819
|
+
});
|
|
1820
|
+
return {
|
|
1821
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1822
|
+
warnings,
|
|
1823
|
+
usage: response2.usage != null ? {
|
|
1824
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1825
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1826
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1827
|
+
} : void 0,
|
|
1828
|
+
response: {
|
|
1829
|
+
timestamp: currentDate,
|
|
1830
|
+
modelId: this.modelId,
|
|
1831
|
+
headers: responseHeaders2
|
|
1832
|
+
},
|
|
1833
|
+
providerMetadata: {
|
|
1834
|
+
openai: {
|
|
1835
|
+
images: response2.data.map((item) => {
|
|
1836
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1837
|
+
return {
|
|
1838
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1839
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1840
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1841
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1842
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1843
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1844
|
+
};
|
|
1845
|
+
})
|
|
1846
|
+
}
|
|
1847
|
+
}
|
|
1848
|
+
};
|
|
1849
|
+
}
|
|
1758
1850
|
const { value: response, responseHeaders } = await postJsonToApi4({
|
|
1759
1851
|
url: this.config.url({
|
|
1760
1852
|
path: "/images/generations",
|
|
@@ -1766,7 +1858,7 @@ var OpenAIImageModel = class {
|
|
|
1766
1858
|
prompt,
|
|
1767
1859
|
n,
|
|
1768
1860
|
size,
|
|
1769
|
-
...(
|
|
1861
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1770
1862
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1771
1863
|
},
|
|
1772
1864
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1780,9 +1872,9 @@ var OpenAIImageModel = class {
|
|
|
1780
1872
|
images: response.data.map((item) => item.b64_json),
|
|
1781
1873
|
warnings,
|
|
1782
1874
|
usage: response.usage != null ? {
|
|
1783
|
-
inputTokens: (
|
|
1784
|
-
outputTokens: (
|
|
1785
|
-
totalTokens: (
|
|
1875
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1876
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1877
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1786
1878
|
} : void 0,
|
|
1787
1879
|
response: {
|
|
1788
1880
|
timestamp: currentDate,
|
|
@@ -1807,15 +1899,23 @@ var OpenAIImageModel = class {
|
|
|
1807
1899
|
};
|
|
1808
1900
|
}
|
|
1809
1901
|
};
|
|
1902
|
+
async function fileToBlob(file) {
|
|
1903
|
+
if (!file) return void 0;
|
|
1904
|
+
if (file.type === "url") {
|
|
1905
|
+
return downloadBlob(file.url);
|
|
1906
|
+
}
|
|
1907
|
+
const data = file.data instanceof Uint8Array ? file.data : convertBase64ToUint8Array(file.data);
|
|
1908
|
+
return new Blob([data], { type: file.mediaType });
|
|
1909
|
+
}
|
|
1810
1910
|
|
|
1811
1911
|
// src/transcription/openai-transcription-model.ts
|
|
1812
1912
|
import {
|
|
1813
1913
|
combineHeaders as combineHeaders5,
|
|
1814
|
-
convertBase64ToUint8Array,
|
|
1914
|
+
convertBase64ToUint8Array as convertBase64ToUint8Array2,
|
|
1815
1915
|
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1816
1916
|
mediaTypeToExtension,
|
|
1817
1917
|
parseProviderOptions as parseProviderOptions4,
|
|
1818
|
-
postFormDataToApi
|
|
1918
|
+
postFormDataToApi as postFormDataToApi2
|
|
1819
1919
|
} from "@ai-sdk/provider-utils";
|
|
1820
1920
|
|
|
1821
1921
|
// src/transcription/openai-transcription-api.ts
|
|
@@ -1965,7 +2065,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1965
2065
|
schema: openAITranscriptionProviderOptions
|
|
1966
2066
|
});
|
|
1967
2067
|
const formData = new FormData();
|
|
1968
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([
|
|
2068
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array2(audio)]);
|
|
1969
2069
|
formData.append("model", this.modelId);
|
|
1970
2070
|
const fileExtension = mediaTypeToExtension(mediaType);
|
|
1971
2071
|
formData.append(
|
|
@@ -2012,7 +2112,7 @@ var OpenAITranscriptionModel = class {
|
|
|
2012
2112
|
value: response,
|
|
2013
2113
|
responseHeaders,
|
|
2014
2114
|
rawValue: rawResponse
|
|
2015
|
-
} = await
|
|
2115
|
+
} = await postFormDataToApi2({
|
|
2016
2116
|
url: this.config.url({
|
|
2017
2117
|
path: "/audio/transcriptions",
|
|
2018
2118
|
modelId: this.modelId
|
|
@@ -3597,7 +3697,26 @@ var openaiResponsesProviderOptionsSchema = lazySchema15(
|
|
|
3597
3697
|
* Defaults to `undefined`.
|
|
3598
3698
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3599
3699
|
*/
|
|
3600
|
-
user: z17.string().nullish()
|
|
3700
|
+
user: z17.string().nullish(),
|
|
3701
|
+
/**
|
|
3702
|
+
* Override the system message mode for this model.
|
|
3703
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3704
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3705
|
+
* - 'remove': Remove system messages entirely
|
|
3706
|
+
*
|
|
3707
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3708
|
+
*/
|
|
3709
|
+
systemMessageMode: z17.enum(["system", "developer", "remove"]).optional(),
|
|
3710
|
+
/**
|
|
3711
|
+
* Force treating this model as a reasoning model.
|
|
3712
|
+
*
|
|
3713
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3714
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3715
|
+
*
|
|
3716
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3717
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3718
|
+
*/
|
|
3719
|
+
forceReasoning: z17.boolean().optional()
|
|
3601
3720
|
})
|
|
3602
3721
|
)
|
|
3603
3722
|
);
|
|
@@ -4150,7 +4269,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4150
4269
|
toolChoice,
|
|
4151
4270
|
responseFormat
|
|
4152
4271
|
}) {
|
|
4153
|
-
var _a, _b, _c, _d;
|
|
4272
|
+
var _a, _b, _c, _d, _e, _f;
|
|
4154
4273
|
const warnings = [];
|
|
4155
4274
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
4156
4275
|
if (topK != null) {
|
|
@@ -4173,6 +4292,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4173
4292
|
providerOptions,
|
|
4174
4293
|
schema: openaiResponsesProviderOptionsSchema
|
|
4175
4294
|
});
|
|
4295
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
4176
4296
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
4177
4297
|
warnings.push({
|
|
4178
4298
|
type: "unsupported",
|
|
@@ -4197,15 +4317,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4197
4317
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
4198
4318
|
prompt,
|
|
4199
4319
|
toolNameMapping,
|
|
4200
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4320
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
4201
4321
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
4202
|
-
store: (
|
|
4322
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
4203
4323
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
4204
4324
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
4205
4325
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
4206
4326
|
});
|
|
4207
4327
|
warnings.push(...inputWarnings);
|
|
4208
|
-
const strictJsonSchema = (
|
|
4328
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
4209
4329
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
4210
4330
|
function addInclude(key) {
|
|
4211
4331
|
if (include == null) {
|
|
@@ -4221,9 +4341,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4221
4341
|
if (topLogprobs) {
|
|
4222
4342
|
addInclude("message.output_text.logprobs");
|
|
4223
4343
|
}
|
|
4224
|
-
const webSearchToolName = (
|
|
4344
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
4225
4345
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
4226
|
-
)) == null ? void 0 :
|
|
4346
|
+
)) == null ? void 0 : _e.name;
|
|
4227
4347
|
if (webSearchToolName) {
|
|
4228
4348
|
addInclude("web_search_call.action.sources");
|
|
4229
4349
|
}
|
|
@@ -4231,7 +4351,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4231
4351
|
addInclude("code_interpreter_call.outputs");
|
|
4232
4352
|
}
|
|
4233
4353
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
4234
|
-
if (store === false &&
|
|
4354
|
+
if (store === false && isReasoningModel) {
|
|
4235
4355
|
addInclude("reasoning.encrypted_content");
|
|
4236
4356
|
}
|
|
4237
4357
|
const baseArgs = {
|
|
@@ -4246,7 +4366,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4246
4366
|
format: responseFormat.schema != null ? {
|
|
4247
4367
|
type: "json_schema",
|
|
4248
4368
|
strict: strictJsonSchema,
|
|
4249
|
-
name: (
|
|
4369
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
4250
4370
|
description: responseFormat.description,
|
|
4251
4371
|
schema: responseFormat.schema
|
|
4252
4372
|
} : { type: "json_object" }
|
|
@@ -4273,7 +4393,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4273
4393
|
top_logprobs: topLogprobs,
|
|
4274
4394
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4275
4395
|
// model-specific settings:
|
|
4276
|
-
...
|
|
4396
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4277
4397
|
reasoning: {
|
|
4278
4398
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
4279
4399
|
effort: openaiOptions.reasoningEffort
|
|
@@ -4284,7 +4404,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4284
4404
|
}
|
|
4285
4405
|
}
|
|
4286
4406
|
};
|
|
4287
|
-
if (
|
|
4407
|
+
if (isReasoningModel) {
|
|
4288
4408
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
4289
4409
|
if (baseArgs.temperature != null) {
|
|
4290
4410
|
baseArgs.temperature = void 0;
|