@ai-sdk/openai 3.0.0-beta.104 → 3.0.0-beta.105
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +140 -24
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +148 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -2
- package/dist/internal/index.d.ts +4 -2
- package/dist/internal/index.js +139 -23
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +147 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
|
@@ -20,6 +20,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
21
|
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
22
|
safetyIdentifier?: string | undefined;
|
|
23
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
24
|
+
forceReasoning?: boolean | undefined;
|
|
23
25
|
}>;
|
|
24
26
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
25
27
|
|
|
@@ -464,6 +466,8 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
464
466
|
textVerbosity?: "low" | "medium" | "high" | null | undefined;
|
|
465
467
|
truncation?: "auto" | "disabled" | null | undefined;
|
|
466
468
|
user?: string | null | undefined;
|
|
469
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
470
|
+
forceReasoning?: boolean | undefined;
|
|
467
471
|
}>;
|
|
468
472
|
type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
|
|
469
473
|
|
package/dist/index.d.ts
CHANGED
|
@@ -20,6 +20,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
21
|
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
22
|
safetyIdentifier?: string | undefined;
|
|
23
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
24
|
+
forceReasoning?: boolean | undefined;
|
|
23
25
|
}>;
|
|
24
26
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
25
27
|
|
|
@@ -464,6 +466,8 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
464
466
|
textVerbosity?: "low" | "medium" | "high" | null | undefined;
|
|
465
467
|
truncation?: "auto" | "disabled" | null | undefined;
|
|
466
468
|
user?: string | null | undefined;
|
|
469
|
+
systemMessageMode?: "remove" | "system" | "developer" | undefined;
|
|
470
|
+
forceReasoning?: boolean | undefined;
|
|
467
471
|
}>;
|
|
468
472
|
type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
|
|
469
473
|
|
package/dist/index.js
CHANGED
|
@@ -56,7 +56,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
|
56
56
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
57
57
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
58
58
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
59
|
-
const isReasoningModel =
|
|
59
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
60
60
|
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
61
61
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
62
62
|
return {
|
|
@@ -556,7 +556,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
556
556
|
* username or email address, in order to avoid sending us any identifying
|
|
557
557
|
* information.
|
|
558
558
|
*/
|
|
559
|
-
safetyIdentifier: import_v43.z.string().optional()
|
|
559
|
+
safetyIdentifier: import_v43.z.string().optional(),
|
|
560
|
+
/**
|
|
561
|
+
* Override the system message mode for this model.
|
|
562
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
563
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
564
|
+
* - 'remove': Remove system messages entirely
|
|
565
|
+
*
|
|
566
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
567
|
+
*/
|
|
568
|
+
systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
|
|
569
|
+
/**
|
|
570
|
+
* Force treating this model as a reasoning model.
|
|
571
|
+
*
|
|
572
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
573
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
574
|
+
*
|
|
575
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
576
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
577
|
+
*/
|
|
578
|
+
forceReasoning: import_v43.z.boolean().optional()
|
|
560
579
|
})
|
|
561
580
|
)
|
|
562
581
|
);
|
|
@@ -651,7 +670,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
651
670
|
toolChoice,
|
|
652
671
|
providerOptions
|
|
653
672
|
}) {
|
|
654
|
-
var _a, _b, _c;
|
|
673
|
+
var _a, _b, _c, _d, _e;
|
|
655
674
|
const warnings = [];
|
|
656
675
|
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
657
676
|
provider: "openai",
|
|
@@ -659,17 +678,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
659
678
|
schema: openaiChatLanguageModelOptions
|
|
660
679
|
})) != null ? _a : {};
|
|
661
680
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
681
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
662
682
|
if (topK != null) {
|
|
663
683
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
664
684
|
}
|
|
665
685
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
666
686
|
{
|
|
667
687
|
prompt,
|
|
668
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
688
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
669
689
|
}
|
|
670
690
|
);
|
|
671
691
|
warnings.push(...messageWarnings);
|
|
672
|
-
const strictJsonSchema = (
|
|
692
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
673
693
|
const baseArgs = {
|
|
674
694
|
// model id:
|
|
675
695
|
model: this.modelId,
|
|
@@ -690,7 +710,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
690
710
|
json_schema: {
|
|
691
711
|
schema: responseFormat.schema,
|
|
692
712
|
strict: strictJsonSchema,
|
|
693
|
-
name: (
|
|
713
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
694
714
|
description: responseFormat.description
|
|
695
715
|
}
|
|
696
716
|
} : { type: "json_object" } : void 0,
|
|
@@ -711,7 +731,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
711
731
|
// messages:
|
|
712
732
|
messages
|
|
713
733
|
};
|
|
714
|
-
if (
|
|
734
|
+
if (isReasoningModel) {
|
|
715
735
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
716
736
|
if (baseArgs.temperature != null) {
|
|
717
737
|
baseArgs.temperature = void 0;
|
|
@@ -1731,6 +1751,8 @@ var OpenAIImageModel = class {
|
|
|
1731
1751
|
}
|
|
1732
1752
|
async doGenerate({
|
|
1733
1753
|
prompt,
|
|
1754
|
+
files,
|
|
1755
|
+
mask,
|
|
1734
1756
|
n,
|
|
1735
1757
|
size,
|
|
1736
1758
|
aspectRatio,
|
|
@@ -1739,7 +1761,7 @@ var OpenAIImageModel = class {
|
|
|
1739
1761
|
headers,
|
|
1740
1762
|
abortSignal
|
|
1741
1763
|
}) {
|
|
1742
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1764
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1743
1765
|
const warnings = [];
|
|
1744
1766
|
if (aspectRatio != null) {
|
|
1745
1767
|
warnings.push({
|
|
@@ -1752,6 +1774,72 @@ var OpenAIImageModel = class {
|
|
|
1752
1774
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1753
1775
|
}
|
|
1754
1776
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1777
|
+
if (files != null) {
|
|
1778
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
|
|
1779
|
+
url: this.config.url({
|
|
1780
|
+
path: "/images/edits",
|
|
1781
|
+
modelId: this.modelId
|
|
1782
|
+
}),
|
|
1783
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1784
|
+
formData: (0, import_provider_utils13.convertToFormData)({
|
|
1785
|
+
model: this.modelId,
|
|
1786
|
+
prompt,
|
|
1787
|
+
image: await Promise.all(
|
|
1788
|
+
files.map(
|
|
1789
|
+
(file) => file.type === "file" ? new Blob(
|
|
1790
|
+
[
|
|
1791
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1792
|
+
type: file.mediaType
|
|
1793
|
+
}) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
|
|
1794
|
+
type: file.mediaType
|
|
1795
|
+
})
|
|
1796
|
+
],
|
|
1797
|
+
{ type: file.mediaType }
|
|
1798
|
+
) : (0, import_provider_utils13.downloadBlob)(file.url)
|
|
1799
|
+
)
|
|
1800
|
+
),
|
|
1801
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1802
|
+
n,
|
|
1803
|
+
size,
|
|
1804
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1805
|
+
}),
|
|
1806
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1807
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1808
|
+
openaiImageResponseSchema
|
|
1809
|
+
),
|
|
1810
|
+
abortSignal,
|
|
1811
|
+
fetch: this.config.fetch
|
|
1812
|
+
});
|
|
1813
|
+
return {
|
|
1814
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1815
|
+
warnings,
|
|
1816
|
+
usage: response2.usage != null ? {
|
|
1817
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1818
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1819
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1820
|
+
} : void 0,
|
|
1821
|
+
response: {
|
|
1822
|
+
timestamp: currentDate,
|
|
1823
|
+
modelId: this.modelId,
|
|
1824
|
+
headers: responseHeaders2
|
|
1825
|
+
},
|
|
1826
|
+
providerMetadata: {
|
|
1827
|
+
openai: {
|
|
1828
|
+
images: response2.data.map((item) => {
|
|
1829
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1830
|
+
return {
|
|
1831
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1832
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1833
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1834
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1835
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1836
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1837
|
+
};
|
|
1838
|
+
})
|
|
1839
|
+
}
|
|
1840
|
+
}
|
|
1841
|
+
};
|
|
1842
|
+
}
|
|
1755
1843
|
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1756
1844
|
url: this.config.url({
|
|
1757
1845
|
path: "/images/generations",
|
|
@@ -1763,7 +1851,7 @@ var OpenAIImageModel = class {
|
|
|
1763
1851
|
prompt,
|
|
1764
1852
|
n,
|
|
1765
1853
|
size,
|
|
1766
|
-
...(
|
|
1854
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1767
1855
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1768
1856
|
},
|
|
1769
1857
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1777,9 +1865,9 @@ var OpenAIImageModel = class {
|
|
|
1777
1865
|
images: response.data.map((item) => item.b64_json),
|
|
1778
1866
|
warnings,
|
|
1779
1867
|
usage: response.usage != null ? {
|
|
1780
|
-
inputTokens: (
|
|
1781
|
-
outputTokens: (
|
|
1782
|
-
totalTokens: (
|
|
1868
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1869
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1870
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1783
1871
|
} : void 0,
|
|
1784
1872
|
response: {
|
|
1785
1873
|
timestamp: currentDate,
|
|
@@ -1804,6 +1892,14 @@ var OpenAIImageModel = class {
|
|
|
1804
1892
|
};
|
|
1805
1893
|
}
|
|
1806
1894
|
};
|
|
1895
|
+
async function fileToBlob(file) {
|
|
1896
|
+
if (!file) return void 0;
|
|
1897
|
+
if (file.type === "url") {
|
|
1898
|
+
return (0, import_provider_utils13.downloadBlob)(file.url);
|
|
1899
|
+
}
|
|
1900
|
+
const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
|
|
1901
|
+
return new Blob([data], { type: file.mediaType });
|
|
1902
|
+
}
|
|
1807
1903
|
|
|
1808
1904
|
// src/tool/apply-patch.ts
|
|
1809
1905
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
@@ -3613,7 +3709,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
|
|
|
3613
3709
|
* Defaults to `undefined`.
|
|
3614
3710
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3615
3711
|
*/
|
|
3616
|
-
user: import_v420.z.string().nullish()
|
|
3712
|
+
user: import_v420.z.string().nullish(),
|
|
3713
|
+
/**
|
|
3714
|
+
* Override the system message mode for this model.
|
|
3715
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3716
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3717
|
+
* - 'remove': Remove system messages entirely
|
|
3718
|
+
*
|
|
3719
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3720
|
+
*/
|
|
3721
|
+
systemMessageMode: import_v420.z.enum(["system", "developer", "remove"]).optional(),
|
|
3722
|
+
/**
|
|
3723
|
+
* Force treating this model as a reasoning model.
|
|
3724
|
+
*
|
|
3725
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3726
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3727
|
+
*
|
|
3728
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3729
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3730
|
+
*/
|
|
3731
|
+
forceReasoning: import_v420.z.boolean().optional()
|
|
3617
3732
|
})
|
|
3618
3733
|
)
|
|
3619
3734
|
);
|
|
@@ -3824,7 +3939,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3824
3939
|
toolChoice,
|
|
3825
3940
|
responseFormat
|
|
3826
3941
|
}) {
|
|
3827
|
-
var _a, _b, _c, _d;
|
|
3942
|
+
var _a, _b, _c, _d, _e, _f;
|
|
3828
3943
|
const warnings = [];
|
|
3829
3944
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
3830
3945
|
if (topK != null) {
|
|
@@ -3847,6 +3962,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3847
3962
|
providerOptions,
|
|
3848
3963
|
schema: openaiResponsesProviderOptionsSchema
|
|
3849
3964
|
});
|
|
3965
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
3850
3966
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3851
3967
|
warnings.push({
|
|
3852
3968
|
type: "unsupported",
|
|
@@ -3871,15 +3987,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3871
3987
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3872
3988
|
prompt,
|
|
3873
3989
|
toolNameMapping,
|
|
3874
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
3990
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
3875
3991
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
3876
|
-
store: (
|
|
3992
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
3877
3993
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
3878
3994
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
3879
3995
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
3880
3996
|
});
|
|
3881
3997
|
warnings.push(...inputWarnings);
|
|
3882
|
-
const strictJsonSchema = (
|
|
3998
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
3883
3999
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
3884
4000
|
function addInclude(key) {
|
|
3885
4001
|
if (include == null) {
|
|
@@ -3895,9 +4011,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3895
4011
|
if (topLogprobs) {
|
|
3896
4012
|
addInclude("message.output_text.logprobs");
|
|
3897
4013
|
}
|
|
3898
|
-
const webSearchToolName = (
|
|
4014
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
3899
4015
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
3900
|
-
)) == null ? void 0 :
|
|
4016
|
+
)) == null ? void 0 : _e.name;
|
|
3901
4017
|
if (webSearchToolName) {
|
|
3902
4018
|
addInclude("web_search_call.action.sources");
|
|
3903
4019
|
}
|
|
@@ -3905,7 +4021,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3905
4021
|
addInclude("code_interpreter_call.outputs");
|
|
3906
4022
|
}
|
|
3907
4023
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
3908
|
-
if (store === false &&
|
|
4024
|
+
if (store === false && isReasoningModel) {
|
|
3909
4025
|
addInclude("reasoning.encrypted_content");
|
|
3910
4026
|
}
|
|
3911
4027
|
const baseArgs = {
|
|
@@ -3920,7 +4036,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3920
4036
|
format: responseFormat.schema != null ? {
|
|
3921
4037
|
type: "json_schema",
|
|
3922
4038
|
strict: strictJsonSchema,
|
|
3923
|
-
name: (
|
|
4039
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
3924
4040
|
description: responseFormat.description,
|
|
3925
4041
|
schema: responseFormat.schema
|
|
3926
4042
|
} : { type: "json_object" }
|
|
@@ -3947,7 +4063,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3947
4063
|
top_logprobs: topLogprobs,
|
|
3948
4064
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
3949
4065
|
// model-specific settings:
|
|
3950
|
-
...
|
|
4066
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
3951
4067
|
reasoning: {
|
|
3952
4068
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
3953
4069
|
effort: openaiOptions.reasoningEffort
|
|
@@ -3958,7 +4074,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3958
4074
|
}
|
|
3959
4075
|
}
|
|
3960
4076
|
};
|
|
3961
|
-
if (
|
|
4077
|
+
if (isReasoningModel) {
|
|
3962
4078
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
3963
4079
|
if (baseArgs.temperature != null) {
|
|
3964
4080
|
baseArgs.temperature = void 0;
|
|
@@ -5495,7 +5611,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5495
5611
|
};
|
|
5496
5612
|
|
|
5497
5613
|
// src/version.ts
|
|
5498
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5614
|
+
var VERSION = true ? "3.0.0-beta.105" : "0.0.0-test";
|
|
5499
5615
|
|
|
5500
5616
|
// src/openai-provider.ts
|
|
5501
5617
|
function createOpenAI(options = {}) {
|