@ai-sdk/openai 3.0.0-beta.102 → 3.0.0-beta.105
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/dist/index.d.mts +13 -1
- package/dist/index.d.ts +13 -1
- package/dist/index.js +146 -26
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +154 -30
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +5 -3
- package/dist/internal/index.d.ts +5 -3
- package/dist/internal/index.js +143 -25
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +151 -29
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -35,7 +35,7 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
35
35
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
36
36
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
37
37
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
38
|
-
const isReasoningModel =
|
|
38
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
39
39
|
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
40
40
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
41
41
|
return {
|
|
@@ -537,7 +537,26 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
537
537
|
* username or email address, in order to avoid sending us any identifying
|
|
538
538
|
* information.
|
|
539
539
|
*/
|
|
540
|
-
safetyIdentifier: z3.string().optional()
|
|
540
|
+
safetyIdentifier: z3.string().optional(),
|
|
541
|
+
/**
|
|
542
|
+
* Override the system message mode for this model.
|
|
543
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
544
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
545
|
+
* - 'remove': Remove system messages entirely
|
|
546
|
+
*
|
|
547
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
548
|
+
*/
|
|
549
|
+
systemMessageMode: z3.enum(["system", "developer", "remove"]).optional(),
|
|
550
|
+
/**
|
|
551
|
+
* Force treating this model as a reasoning model.
|
|
552
|
+
*
|
|
553
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
554
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
555
|
+
*
|
|
556
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
557
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
558
|
+
*/
|
|
559
|
+
forceReasoning: z3.boolean().optional()
|
|
541
560
|
})
|
|
542
561
|
)
|
|
543
562
|
);
|
|
@@ -634,7 +653,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
634
653
|
toolChoice,
|
|
635
654
|
providerOptions
|
|
636
655
|
}) {
|
|
637
|
-
var _a, _b, _c;
|
|
656
|
+
var _a, _b, _c, _d, _e;
|
|
638
657
|
const warnings = [];
|
|
639
658
|
const openaiOptions = (_a = await parseProviderOptions({
|
|
640
659
|
provider: "openai",
|
|
@@ -642,17 +661,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
642
661
|
schema: openaiChatLanguageModelOptions
|
|
643
662
|
})) != null ? _a : {};
|
|
644
663
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
664
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
645
665
|
if (topK != null) {
|
|
646
666
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
647
667
|
}
|
|
648
668
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
649
669
|
{
|
|
650
670
|
prompt,
|
|
651
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
671
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
652
672
|
}
|
|
653
673
|
);
|
|
654
674
|
warnings.push(...messageWarnings);
|
|
655
|
-
const strictJsonSchema = (
|
|
675
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
656
676
|
const baseArgs = {
|
|
657
677
|
// model id:
|
|
658
678
|
model: this.modelId,
|
|
@@ -673,7 +693,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
673
693
|
json_schema: {
|
|
674
694
|
schema: responseFormat.schema,
|
|
675
695
|
strict: strictJsonSchema,
|
|
676
|
-
name: (
|
|
696
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
677
697
|
description: responseFormat.description
|
|
678
698
|
}
|
|
679
699
|
} : { type: "json_object" } : void 0,
|
|
@@ -694,7 +714,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
694
714
|
// messages:
|
|
695
715
|
messages
|
|
696
716
|
};
|
|
697
|
-
if (
|
|
717
|
+
if (isReasoningModel) {
|
|
698
718
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
699
719
|
if (baseArgs.temperature != null) {
|
|
700
720
|
baseArgs.temperature = void 0;
|
|
@@ -1670,7 +1690,11 @@ var OpenAIEmbeddingModel = class {
|
|
|
1670
1690
|
// src/image/openai-image-model.ts
|
|
1671
1691
|
import {
|
|
1672
1692
|
combineHeaders as combineHeaders4,
|
|
1693
|
+
convertBase64ToUint8Array,
|
|
1694
|
+
convertToFormData,
|
|
1673
1695
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1696
|
+
downloadBlob,
|
|
1697
|
+
postFormDataToApi,
|
|
1674
1698
|
postJsonToApi as postJsonToApi4
|
|
1675
1699
|
} from "@ai-sdk/provider-utils";
|
|
1676
1700
|
|
|
@@ -1709,11 +1733,13 @@ var modelMaxImagesPerCall = {
|
|
|
1709
1733
|
"dall-e-3": 1,
|
|
1710
1734
|
"dall-e-2": 10,
|
|
1711
1735
|
"gpt-image-1": 10,
|
|
1712
|
-
"gpt-image-1-mini": 10
|
|
1736
|
+
"gpt-image-1-mini": 10,
|
|
1737
|
+
"gpt-image-1.5": 10
|
|
1713
1738
|
};
|
|
1714
1739
|
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1715
1740
|
"gpt-image-1",
|
|
1716
|
-
"gpt-image-1-mini"
|
|
1741
|
+
"gpt-image-1-mini",
|
|
1742
|
+
"gpt-image-1.5"
|
|
1717
1743
|
]);
|
|
1718
1744
|
|
|
1719
1745
|
// src/image/openai-image-model.ts
|
|
@@ -1732,6 +1758,8 @@ var OpenAIImageModel = class {
|
|
|
1732
1758
|
}
|
|
1733
1759
|
async doGenerate({
|
|
1734
1760
|
prompt,
|
|
1761
|
+
files,
|
|
1762
|
+
mask,
|
|
1735
1763
|
n,
|
|
1736
1764
|
size,
|
|
1737
1765
|
aspectRatio,
|
|
@@ -1740,7 +1768,7 @@ var OpenAIImageModel = class {
|
|
|
1740
1768
|
headers,
|
|
1741
1769
|
abortSignal
|
|
1742
1770
|
}) {
|
|
1743
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1771
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1744
1772
|
const warnings = [];
|
|
1745
1773
|
if (aspectRatio != null) {
|
|
1746
1774
|
warnings.push({
|
|
@@ -1753,6 +1781,72 @@ var OpenAIImageModel = class {
|
|
|
1753
1781
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1754
1782
|
}
|
|
1755
1783
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1784
|
+
if (files != null) {
|
|
1785
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await postFormDataToApi({
|
|
1786
|
+
url: this.config.url({
|
|
1787
|
+
path: "/images/edits",
|
|
1788
|
+
modelId: this.modelId
|
|
1789
|
+
}),
|
|
1790
|
+
headers: combineHeaders4(this.config.headers(), headers),
|
|
1791
|
+
formData: convertToFormData({
|
|
1792
|
+
model: this.modelId,
|
|
1793
|
+
prompt,
|
|
1794
|
+
image: await Promise.all(
|
|
1795
|
+
files.map(
|
|
1796
|
+
(file) => file.type === "file" ? new Blob(
|
|
1797
|
+
[
|
|
1798
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1799
|
+
type: file.mediaType
|
|
1800
|
+
}) : new Blob([convertBase64ToUint8Array(file.data)], {
|
|
1801
|
+
type: file.mediaType
|
|
1802
|
+
})
|
|
1803
|
+
],
|
|
1804
|
+
{ type: file.mediaType }
|
|
1805
|
+
) : downloadBlob(file.url)
|
|
1806
|
+
)
|
|
1807
|
+
),
|
|
1808
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1809
|
+
n,
|
|
1810
|
+
size,
|
|
1811
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1812
|
+
}),
|
|
1813
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1814
|
+
successfulResponseHandler: createJsonResponseHandler4(
|
|
1815
|
+
openaiImageResponseSchema
|
|
1816
|
+
),
|
|
1817
|
+
abortSignal,
|
|
1818
|
+
fetch: this.config.fetch
|
|
1819
|
+
});
|
|
1820
|
+
return {
|
|
1821
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1822
|
+
warnings,
|
|
1823
|
+
usage: response2.usage != null ? {
|
|
1824
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1825
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1826
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1827
|
+
} : void 0,
|
|
1828
|
+
response: {
|
|
1829
|
+
timestamp: currentDate,
|
|
1830
|
+
modelId: this.modelId,
|
|
1831
|
+
headers: responseHeaders2
|
|
1832
|
+
},
|
|
1833
|
+
providerMetadata: {
|
|
1834
|
+
openai: {
|
|
1835
|
+
images: response2.data.map((item) => {
|
|
1836
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1837
|
+
return {
|
|
1838
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1839
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1840
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1841
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1842
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1843
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1844
|
+
};
|
|
1845
|
+
})
|
|
1846
|
+
}
|
|
1847
|
+
}
|
|
1848
|
+
};
|
|
1849
|
+
}
|
|
1756
1850
|
const { value: response, responseHeaders } = await postJsonToApi4({
|
|
1757
1851
|
url: this.config.url({
|
|
1758
1852
|
path: "/images/generations",
|
|
@@ -1764,7 +1858,7 @@ var OpenAIImageModel = class {
|
|
|
1764
1858
|
prompt,
|
|
1765
1859
|
n,
|
|
1766
1860
|
size,
|
|
1767
|
-
...(
|
|
1861
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1768
1862
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1769
1863
|
},
|
|
1770
1864
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1778,9 +1872,9 @@ var OpenAIImageModel = class {
|
|
|
1778
1872
|
images: response.data.map((item) => item.b64_json),
|
|
1779
1873
|
warnings,
|
|
1780
1874
|
usage: response.usage != null ? {
|
|
1781
|
-
inputTokens: (
|
|
1782
|
-
outputTokens: (
|
|
1783
|
-
totalTokens: (
|
|
1875
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1876
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1877
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1784
1878
|
} : void 0,
|
|
1785
1879
|
response: {
|
|
1786
1880
|
timestamp: currentDate,
|
|
@@ -1805,15 +1899,23 @@ var OpenAIImageModel = class {
|
|
|
1805
1899
|
};
|
|
1806
1900
|
}
|
|
1807
1901
|
};
|
|
1902
|
+
async function fileToBlob(file) {
|
|
1903
|
+
if (!file) return void 0;
|
|
1904
|
+
if (file.type === "url") {
|
|
1905
|
+
return downloadBlob(file.url);
|
|
1906
|
+
}
|
|
1907
|
+
const data = file.data instanceof Uint8Array ? file.data : convertBase64ToUint8Array(file.data);
|
|
1908
|
+
return new Blob([data], { type: file.mediaType });
|
|
1909
|
+
}
|
|
1808
1910
|
|
|
1809
1911
|
// src/transcription/openai-transcription-model.ts
|
|
1810
1912
|
import {
|
|
1811
1913
|
combineHeaders as combineHeaders5,
|
|
1812
|
-
convertBase64ToUint8Array,
|
|
1914
|
+
convertBase64ToUint8Array as convertBase64ToUint8Array2,
|
|
1813
1915
|
createJsonResponseHandler as createJsonResponseHandler5,
|
|
1814
1916
|
mediaTypeToExtension,
|
|
1815
1917
|
parseProviderOptions as parseProviderOptions4,
|
|
1816
|
-
postFormDataToApi
|
|
1918
|
+
postFormDataToApi as postFormDataToApi2
|
|
1817
1919
|
} from "@ai-sdk/provider-utils";
|
|
1818
1920
|
|
|
1819
1921
|
// src/transcription/openai-transcription-api.ts
|
|
@@ -1963,7 +2065,7 @@ var OpenAITranscriptionModel = class {
|
|
|
1963
2065
|
schema: openAITranscriptionProviderOptions
|
|
1964
2066
|
});
|
|
1965
2067
|
const formData = new FormData();
|
|
1966
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([
|
|
2068
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array2(audio)]);
|
|
1967
2069
|
formData.append("model", this.modelId);
|
|
1968
2070
|
const fileExtension = mediaTypeToExtension(mediaType);
|
|
1969
2071
|
formData.append(
|
|
@@ -2010,7 +2112,7 @@ var OpenAITranscriptionModel = class {
|
|
|
2010
2112
|
value: response,
|
|
2011
2113
|
responseHeaders,
|
|
2012
2114
|
rawValue: rawResponse
|
|
2013
|
-
} = await
|
|
2115
|
+
} = await postFormDataToApi2({
|
|
2014
2116
|
url: this.config.url({
|
|
2015
2117
|
path: "/audio/transcriptions",
|
|
2016
2118
|
modelId: this.modelId
|
|
@@ -3595,7 +3697,26 @@ var openaiResponsesProviderOptionsSchema = lazySchema15(
|
|
|
3595
3697
|
* Defaults to `undefined`.
|
|
3596
3698
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3597
3699
|
*/
|
|
3598
|
-
user: z17.string().nullish()
|
|
3700
|
+
user: z17.string().nullish(),
|
|
3701
|
+
/**
|
|
3702
|
+
* Override the system message mode for this model.
|
|
3703
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3704
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3705
|
+
* - 'remove': Remove system messages entirely
|
|
3706
|
+
*
|
|
3707
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3708
|
+
*/
|
|
3709
|
+
systemMessageMode: z17.enum(["system", "developer", "remove"]).optional(),
|
|
3710
|
+
/**
|
|
3711
|
+
* Force treating this model as a reasoning model.
|
|
3712
|
+
*
|
|
3713
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3714
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3715
|
+
*
|
|
3716
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3717
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3718
|
+
*/
|
|
3719
|
+
forceReasoning: z17.boolean().optional()
|
|
3599
3720
|
})
|
|
3600
3721
|
)
|
|
3601
3722
|
);
|
|
@@ -4148,7 +4269,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4148
4269
|
toolChoice,
|
|
4149
4270
|
responseFormat
|
|
4150
4271
|
}) {
|
|
4151
|
-
var _a, _b, _c, _d;
|
|
4272
|
+
var _a, _b, _c, _d, _e, _f;
|
|
4152
4273
|
const warnings = [];
|
|
4153
4274
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
4154
4275
|
if (topK != null) {
|
|
@@ -4171,6 +4292,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4171
4292
|
providerOptions,
|
|
4172
4293
|
schema: openaiResponsesProviderOptionsSchema
|
|
4173
4294
|
});
|
|
4295
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
4174
4296
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
4175
4297
|
warnings.push({
|
|
4176
4298
|
type: "unsupported",
|
|
@@ -4195,15 +4317,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4195
4317
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
4196
4318
|
prompt,
|
|
4197
4319
|
toolNameMapping,
|
|
4198
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4320
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
4199
4321
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
4200
|
-
store: (
|
|
4322
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
4201
4323
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
4202
4324
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
4203
4325
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
4204
4326
|
});
|
|
4205
4327
|
warnings.push(...inputWarnings);
|
|
4206
|
-
const strictJsonSchema = (
|
|
4328
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
4207
4329
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
4208
4330
|
function addInclude(key) {
|
|
4209
4331
|
if (include == null) {
|
|
@@ -4219,9 +4341,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4219
4341
|
if (topLogprobs) {
|
|
4220
4342
|
addInclude("message.output_text.logprobs");
|
|
4221
4343
|
}
|
|
4222
|
-
const webSearchToolName = (
|
|
4344
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
4223
4345
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
4224
|
-
)) == null ? void 0 :
|
|
4346
|
+
)) == null ? void 0 : _e.name;
|
|
4225
4347
|
if (webSearchToolName) {
|
|
4226
4348
|
addInclude("web_search_call.action.sources");
|
|
4227
4349
|
}
|
|
@@ -4229,7 +4351,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4229
4351
|
addInclude("code_interpreter_call.outputs");
|
|
4230
4352
|
}
|
|
4231
4353
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
4232
|
-
if (store === false &&
|
|
4354
|
+
if (store === false && isReasoningModel) {
|
|
4233
4355
|
addInclude("reasoning.encrypted_content");
|
|
4234
4356
|
}
|
|
4235
4357
|
const baseArgs = {
|
|
@@ -4244,7 +4366,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4244
4366
|
format: responseFormat.schema != null ? {
|
|
4245
4367
|
type: "json_schema",
|
|
4246
4368
|
strict: strictJsonSchema,
|
|
4247
|
-
name: (
|
|
4369
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
4248
4370
|
description: responseFormat.description,
|
|
4249
4371
|
schema: responseFormat.schema
|
|
4250
4372
|
} : { type: "json_object" }
|
|
@@ -4271,7 +4393,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4271
4393
|
top_logprobs: topLogprobs,
|
|
4272
4394
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4273
4395
|
// model-specific settings:
|
|
4274
|
-
...
|
|
4396
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4275
4397
|
reasoning: {
|
|
4276
4398
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
4277
4399
|
effort: openaiOptions.reasoningEffort
|
|
@@ -4282,7 +4404,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4282
4404
|
}
|
|
4283
4405
|
}
|
|
4284
4406
|
};
|
|
4285
|
-
if (
|
|
4407
|
+
if (isReasoningModel) {
|
|
4286
4408
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
4287
4409
|
if (baseArgs.temperature != null) {
|
|
4288
4410
|
baseArgs.temperature = void 0;
|