@ai-sdk/openai 3.0.0-beta.104 → 3.0.0-beta.106
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +140 -24
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +148 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -2
- package/dist/internal/index.d.ts +4 -2
- package/dist/internal/index.js +139 -23
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +147 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -19,6 +19,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
promptCacheKey?: string | undefined;
|
|
20
20
|
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
21
|
safetyIdentifier?: string | undefined;
|
|
22
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
23
|
+
forceReasoning?: boolean | undefined;
|
|
22
24
|
}>;
|
|
23
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
24
26
|
|
|
@@ -131,7 +133,7 @@ declare class OpenAIImageModel implements ImageModelV3 {
|
|
|
131
133
|
get maxImagesPerCall(): number;
|
|
132
134
|
get provider(): string;
|
|
133
135
|
constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
|
|
134
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
|
|
136
|
+
doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
|
|
135
137
|
}
|
|
136
138
|
|
|
137
139
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
@@ -525,7 +527,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
|
|
|
525
527
|
}>;
|
|
526
528
|
|
|
527
529
|
declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
528
|
-
background?: "auto" | "
|
|
530
|
+
background?: "auto" | "transparent" | "opaque" | undefined;
|
|
529
531
|
inputFidelity?: "low" | "high" | undefined;
|
|
530
532
|
inputImageMask?: {
|
|
531
533
|
fileId?: string | undefined;
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -19,6 +19,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
promptCacheKey?: string | undefined;
|
|
20
20
|
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
21
|
safetyIdentifier?: string | undefined;
|
|
22
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
23
|
+
forceReasoning?: boolean | undefined;
|
|
22
24
|
}>;
|
|
23
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
24
26
|
|
|
@@ -131,7 +133,7 @@ declare class OpenAIImageModel implements ImageModelV3 {
|
|
|
131
133
|
get maxImagesPerCall(): number;
|
|
132
134
|
get provider(): string;
|
|
133
135
|
constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
|
|
134
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
|
|
136
|
+
doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
|
|
135
137
|
}
|
|
136
138
|
|
|
137
139
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
@@ -525,7 +527,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
|
|
|
525
527
|
}>;
|
|
526
528
|
|
|
527
529
|
declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
528
|
-
background?: "auto" | "
|
|
530
|
+
background?: "auto" | "transparent" | "opaque" | undefined;
|
|
529
531
|
inputFidelity?: "low" | "high" | undefined;
|
|
530
532
|
inputImageMask?: {
|
|
531
533
|
fileId?: string | undefined;
|
package/dist/internal/index.js
CHANGED
|
@@ -83,7 +83,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
|
83
83
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
84
84
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
85
85
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
86
|
-
const isReasoningModel =
|
|
86
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
87
87
|
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
88
88
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
89
89
|
return {
|
|
@@ -583,7 +583,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
583
583
|
* username or email address, in order to avoid sending us any identifying
|
|
584
584
|
* information.
|
|
585
585
|
*/
|
|
586
|
-
safetyIdentifier: import_v43.z.string().optional()
|
|
586
|
+
safetyIdentifier: import_v43.z.string().optional(),
|
|
587
|
+
/**
|
|
588
|
+
* Override the system message mode for this model.
|
|
589
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
590
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
591
|
+
* - 'remove': Remove system messages entirely
|
|
592
|
+
*
|
|
593
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
594
|
+
*/
|
|
595
|
+
systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
|
|
596
|
+
/**
|
|
597
|
+
* Force treating this model as a reasoning model.
|
|
598
|
+
*
|
|
599
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
600
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
601
|
+
*
|
|
602
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
603
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
604
|
+
*/
|
|
605
|
+
forceReasoning: import_v43.z.boolean().optional()
|
|
587
606
|
})
|
|
588
607
|
)
|
|
589
608
|
);
|
|
@@ -678,7 +697,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
678
697
|
toolChoice,
|
|
679
698
|
providerOptions
|
|
680
699
|
}) {
|
|
681
|
-
var _a, _b, _c;
|
|
700
|
+
var _a, _b, _c, _d, _e;
|
|
682
701
|
const warnings = [];
|
|
683
702
|
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
684
703
|
provider: "openai",
|
|
@@ -686,17 +705,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
686
705
|
schema: openaiChatLanguageModelOptions
|
|
687
706
|
})) != null ? _a : {};
|
|
688
707
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
708
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
689
709
|
if (topK != null) {
|
|
690
710
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
691
711
|
}
|
|
692
712
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
693
713
|
{
|
|
694
714
|
prompt,
|
|
695
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
715
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
696
716
|
}
|
|
697
717
|
);
|
|
698
718
|
warnings.push(...messageWarnings);
|
|
699
|
-
const strictJsonSchema = (
|
|
719
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
700
720
|
const baseArgs = {
|
|
701
721
|
// model id:
|
|
702
722
|
model: this.modelId,
|
|
@@ -717,7 +737,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
717
737
|
json_schema: {
|
|
718
738
|
schema: responseFormat.schema,
|
|
719
739
|
strict: strictJsonSchema,
|
|
720
|
-
name: (
|
|
740
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
721
741
|
description: responseFormat.description
|
|
722
742
|
}
|
|
723
743
|
} : { type: "json_object" } : void 0,
|
|
@@ -738,7 +758,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
738
758
|
// messages:
|
|
739
759
|
messages
|
|
740
760
|
};
|
|
741
|
-
if (
|
|
761
|
+
if (isReasoningModel) {
|
|
742
762
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
743
763
|
if (baseArgs.temperature != null) {
|
|
744
764
|
baseArgs.temperature = void 0;
|
|
@@ -1758,6 +1778,8 @@ var OpenAIImageModel = class {
|
|
|
1758
1778
|
}
|
|
1759
1779
|
async doGenerate({
|
|
1760
1780
|
prompt,
|
|
1781
|
+
files,
|
|
1782
|
+
mask,
|
|
1761
1783
|
n,
|
|
1762
1784
|
size,
|
|
1763
1785
|
aspectRatio,
|
|
@@ -1766,7 +1788,7 @@ var OpenAIImageModel = class {
|
|
|
1766
1788
|
headers,
|
|
1767
1789
|
abortSignal
|
|
1768
1790
|
}) {
|
|
1769
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1791
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1770
1792
|
const warnings = [];
|
|
1771
1793
|
if (aspectRatio != null) {
|
|
1772
1794
|
warnings.push({
|
|
@@ -1779,6 +1801,72 @@ var OpenAIImageModel = class {
|
|
|
1779
1801
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1780
1802
|
}
|
|
1781
1803
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1804
|
+
if (files != null) {
|
|
1805
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
|
|
1806
|
+
url: this.config.url({
|
|
1807
|
+
path: "/images/edits",
|
|
1808
|
+
modelId: this.modelId
|
|
1809
|
+
}),
|
|
1810
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1811
|
+
formData: (0, import_provider_utils13.convertToFormData)({
|
|
1812
|
+
model: this.modelId,
|
|
1813
|
+
prompt,
|
|
1814
|
+
image: await Promise.all(
|
|
1815
|
+
files.map(
|
|
1816
|
+
(file) => file.type === "file" ? new Blob(
|
|
1817
|
+
[
|
|
1818
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1819
|
+
type: file.mediaType
|
|
1820
|
+
}) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
|
|
1821
|
+
type: file.mediaType
|
|
1822
|
+
})
|
|
1823
|
+
],
|
|
1824
|
+
{ type: file.mediaType }
|
|
1825
|
+
) : (0, import_provider_utils13.downloadBlob)(file.url)
|
|
1826
|
+
)
|
|
1827
|
+
),
|
|
1828
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1829
|
+
n,
|
|
1830
|
+
size,
|
|
1831
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1832
|
+
}),
|
|
1833
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1834
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1835
|
+
openaiImageResponseSchema
|
|
1836
|
+
),
|
|
1837
|
+
abortSignal,
|
|
1838
|
+
fetch: this.config.fetch
|
|
1839
|
+
});
|
|
1840
|
+
return {
|
|
1841
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1842
|
+
warnings,
|
|
1843
|
+
usage: response2.usage != null ? {
|
|
1844
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1845
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1846
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1847
|
+
} : void 0,
|
|
1848
|
+
response: {
|
|
1849
|
+
timestamp: currentDate,
|
|
1850
|
+
modelId: this.modelId,
|
|
1851
|
+
headers: responseHeaders2
|
|
1852
|
+
},
|
|
1853
|
+
providerMetadata: {
|
|
1854
|
+
openai: {
|
|
1855
|
+
images: response2.data.map((item) => {
|
|
1856
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1857
|
+
return {
|
|
1858
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1859
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1860
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1861
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1862
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1863
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1864
|
+
};
|
|
1865
|
+
})
|
|
1866
|
+
}
|
|
1867
|
+
}
|
|
1868
|
+
};
|
|
1869
|
+
}
|
|
1782
1870
|
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1783
1871
|
url: this.config.url({
|
|
1784
1872
|
path: "/images/generations",
|
|
@@ -1790,7 +1878,7 @@ var OpenAIImageModel = class {
|
|
|
1790
1878
|
prompt,
|
|
1791
1879
|
n,
|
|
1792
1880
|
size,
|
|
1793
|
-
...(
|
|
1881
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1794
1882
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1795
1883
|
},
|
|
1796
1884
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1804,9 +1892,9 @@ var OpenAIImageModel = class {
|
|
|
1804
1892
|
images: response.data.map((item) => item.b64_json),
|
|
1805
1893
|
warnings,
|
|
1806
1894
|
usage: response.usage != null ? {
|
|
1807
|
-
inputTokens: (
|
|
1808
|
-
outputTokens: (
|
|
1809
|
-
totalTokens: (
|
|
1895
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1896
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1897
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1810
1898
|
} : void 0,
|
|
1811
1899
|
response: {
|
|
1812
1900
|
timestamp: currentDate,
|
|
@@ -1831,6 +1919,14 @@ var OpenAIImageModel = class {
|
|
|
1831
1919
|
};
|
|
1832
1920
|
}
|
|
1833
1921
|
};
|
|
1922
|
+
async function fileToBlob(file) {
|
|
1923
|
+
if (!file) return void 0;
|
|
1924
|
+
if (file.type === "url") {
|
|
1925
|
+
return (0, import_provider_utils13.downloadBlob)(file.url);
|
|
1926
|
+
}
|
|
1927
|
+
const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
|
|
1928
|
+
return new Blob([data], { type: file.mediaType });
|
|
1929
|
+
}
|
|
1834
1930
|
|
|
1835
1931
|
// src/transcription/openai-transcription-model.ts
|
|
1836
1932
|
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
@@ -3580,7 +3676,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
|
|
|
3580
3676
|
* Defaults to `undefined`.
|
|
3581
3677
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3582
3678
|
*/
|
|
3583
|
-
user: import_v417.z.string().nullish()
|
|
3679
|
+
user: import_v417.z.string().nullish(),
|
|
3680
|
+
/**
|
|
3681
|
+
* Override the system message mode for this model.
|
|
3682
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3683
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3684
|
+
* - 'remove': Remove system messages entirely
|
|
3685
|
+
*
|
|
3686
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3687
|
+
*/
|
|
3688
|
+
systemMessageMode: import_v417.z.enum(["system", "developer", "remove"]).optional(),
|
|
3689
|
+
/**
|
|
3690
|
+
* Force treating this model as a reasoning model.
|
|
3691
|
+
*
|
|
3692
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3693
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3694
|
+
*
|
|
3695
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3696
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3697
|
+
*/
|
|
3698
|
+
forceReasoning: import_v417.z.boolean().optional()
|
|
3584
3699
|
})
|
|
3585
3700
|
)
|
|
3586
3701
|
);
|
|
@@ -4107,7 +4222,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4107
4222
|
toolChoice,
|
|
4108
4223
|
responseFormat
|
|
4109
4224
|
}) {
|
|
4110
|
-
var _a, _b, _c, _d;
|
|
4225
|
+
var _a, _b, _c, _d, _e, _f;
|
|
4111
4226
|
const warnings = [];
|
|
4112
4227
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
4113
4228
|
if (topK != null) {
|
|
@@ -4130,6 +4245,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4130
4245
|
providerOptions,
|
|
4131
4246
|
schema: openaiResponsesProviderOptionsSchema
|
|
4132
4247
|
});
|
|
4248
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
4133
4249
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
4134
4250
|
warnings.push({
|
|
4135
4251
|
type: "unsupported",
|
|
@@ -4154,15 +4270,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4154
4270
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
4155
4271
|
prompt,
|
|
4156
4272
|
toolNameMapping,
|
|
4157
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4273
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
4158
4274
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
4159
|
-
store: (
|
|
4275
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
4160
4276
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
4161
4277
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
4162
4278
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
4163
4279
|
});
|
|
4164
4280
|
warnings.push(...inputWarnings);
|
|
4165
|
-
const strictJsonSchema = (
|
|
4281
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
4166
4282
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
4167
4283
|
function addInclude(key) {
|
|
4168
4284
|
if (include == null) {
|
|
@@ -4178,9 +4294,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4178
4294
|
if (topLogprobs) {
|
|
4179
4295
|
addInclude("message.output_text.logprobs");
|
|
4180
4296
|
}
|
|
4181
|
-
const webSearchToolName = (
|
|
4297
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
4182
4298
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
4183
|
-
)) == null ? void 0 :
|
|
4299
|
+
)) == null ? void 0 : _e.name;
|
|
4184
4300
|
if (webSearchToolName) {
|
|
4185
4301
|
addInclude("web_search_call.action.sources");
|
|
4186
4302
|
}
|
|
@@ -4188,7 +4304,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4188
4304
|
addInclude("code_interpreter_call.outputs");
|
|
4189
4305
|
}
|
|
4190
4306
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
4191
|
-
if (store === false &&
|
|
4307
|
+
if (store === false && isReasoningModel) {
|
|
4192
4308
|
addInclude("reasoning.encrypted_content");
|
|
4193
4309
|
}
|
|
4194
4310
|
const baseArgs = {
|
|
@@ -4203,7 +4319,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4203
4319
|
format: responseFormat.schema != null ? {
|
|
4204
4320
|
type: "json_schema",
|
|
4205
4321
|
strict: strictJsonSchema,
|
|
4206
|
-
name: (
|
|
4322
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
4207
4323
|
description: responseFormat.description,
|
|
4208
4324
|
schema: responseFormat.schema
|
|
4209
4325
|
} : { type: "json_object" }
|
|
@@ -4230,7 +4346,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4230
4346
|
top_logprobs: topLogprobs,
|
|
4231
4347
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4232
4348
|
// model-specific settings:
|
|
4233
|
-
...
|
|
4349
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4234
4350
|
reasoning: {
|
|
4235
4351
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
4236
4352
|
effort: openaiOptions.reasoningEffort
|
|
@@ -4241,7 +4357,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4241
4357
|
}
|
|
4242
4358
|
}
|
|
4243
4359
|
};
|
|
4244
|
-
if (
|
|
4360
|
+
if (isReasoningModel) {
|
|
4245
4361
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
4246
4362
|
if (baseArgs.temperature != null) {
|
|
4247
4363
|
baseArgs.temperature = void 0;
|