@ai-sdk/openai 3.0.0-beta.98 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +299 -0
- package/dist/index.d.mts +18 -17
- package/dist/index.d.ts +18 -17
- package/dist/index.js +342 -200
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +350 -204
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +12 -10
- package/dist/internal/index.d.ts +12 -10
- package/dist/internal/index.js +339 -199
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +347 -203
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/internal/index.js
CHANGED
|
@@ -83,8 +83,8 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
|
83
83
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
84
84
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
85
85
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
86
|
-
const isReasoningModel =
|
|
87
|
-
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
|
|
86
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
87
|
+
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
88
88
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
89
89
|
return {
|
|
90
90
|
supportsFlexProcessing,
|
|
@@ -283,6 +283,9 @@ function convertToOpenAIChatMessages({
|
|
|
283
283
|
}
|
|
284
284
|
case "tool": {
|
|
285
285
|
for (const toolResponse of content) {
|
|
286
|
+
if (toolResponse.type === "tool-approval-response") {
|
|
287
|
+
continue;
|
|
288
|
+
}
|
|
286
289
|
const output = toolResponse.output;
|
|
287
290
|
let contentValue;
|
|
288
291
|
switch (output.type) {
|
|
@@ -342,7 +345,7 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
342
345
|
case "tool_calls":
|
|
343
346
|
return "tool-calls";
|
|
344
347
|
default:
|
|
345
|
-
return "
|
|
348
|
+
return "other";
|
|
346
349
|
}
|
|
347
350
|
}
|
|
348
351
|
|
|
@@ -583,7 +586,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
583
586
|
* username or email address, in order to avoid sending us any identifying
|
|
584
587
|
* information.
|
|
585
588
|
*/
|
|
586
|
-
safetyIdentifier: import_v43.z.string().optional()
|
|
589
|
+
safetyIdentifier: import_v43.z.string().optional(),
|
|
590
|
+
/**
|
|
591
|
+
* Override the system message mode for this model.
|
|
592
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
593
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
594
|
+
* - 'remove': Remove system messages entirely
|
|
595
|
+
*
|
|
596
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
597
|
+
*/
|
|
598
|
+
systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
|
|
599
|
+
/**
|
|
600
|
+
* Force treating this model as a reasoning model.
|
|
601
|
+
*
|
|
602
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
603
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
604
|
+
*
|
|
605
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
606
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
607
|
+
*/
|
|
608
|
+
forceReasoning: import_v43.z.boolean().optional()
|
|
587
609
|
})
|
|
588
610
|
)
|
|
589
611
|
);
|
|
@@ -678,7 +700,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
678
700
|
toolChoice,
|
|
679
701
|
providerOptions
|
|
680
702
|
}) {
|
|
681
|
-
var _a, _b, _c;
|
|
703
|
+
var _a, _b, _c, _d, _e;
|
|
682
704
|
const warnings = [];
|
|
683
705
|
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
684
706
|
provider: "openai",
|
|
@@ -686,17 +708,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
686
708
|
schema: openaiChatLanguageModelOptions
|
|
687
709
|
})) != null ? _a : {};
|
|
688
710
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
711
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
689
712
|
if (topK != null) {
|
|
690
713
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
691
714
|
}
|
|
692
715
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
693
716
|
{
|
|
694
717
|
prompt,
|
|
695
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
718
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
696
719
|
}
|
|
697
720
|
);
|
|
698
721
|
warnings.push(...messageWarnings);
|
|
699
|
-
const strictJsonSchema = (
|
|
722
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
700
723
|
const baseArgs = {
|
|
701
724
|
// model id:
|
|
702
725
|
model: this.modelId,
|
|
@@ -717,7 +740,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
717
740
|
json_schema: {
|
|
718
741
|
schema: responseFormat.schema,
|
|
719
742
|
strict: strictJsonSchema,
|
|
720
|
-
name: (
|
|
743
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
721
744
|
description: responseFormat.description
|
|
722
745
|
}
|
|
723
746
|
} : { type: "json_object" } : void 0,
|
|
@@ -738,7 +761,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
738
761
|
// messages:
|
|
739
762
|
messages
|
|
740
763
|
};
|
|
741
|
-
if (
|
|
764
|
+
if (isReasoningModel) {
|
|
742
765
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
743
766
|
if (baseArgs.temperature != null) {
|
|
744
767
|
baseArgs.temperature = void 0;
|
|
@@ -844,7 +867,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
844
867
|
};
|
|
845
868
|
}
|
|
846
869
|
async doGenerate(options) {
|
|
847
|
-
var _a, _b, _c, _d, _e, _f;
|
|
870
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
848
871
|
const { args: body, warnings } = await this.getArgs(options);
|
|
849
872
|
const {
|
|
850
873
|
responseHeaders,
|
|
@@ -901,7 +924,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
901
924
|
}
|
|
902
925
|
return {
|
|
903
926
|
content,
|
|
904
|
-
finishReason:
|
|
927
|
+
finishReason: {
|
|
928
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
929
|
+
raw: (_g = choice.finish_reason) != null ? _g : void 0
|
|
930
|
+
},
|
|
905
931
|
usage: convertOpenAIChatUsage(response.usage),
|
|
906
932
|
request: { body },
|
|
907
933
|
response: {
|
|
@@ -937,7 +963,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
937
963
|
fetch: this.config.fetch
|
|
938
964
|
});
|
|
939
965
|
const toolCalls = [];
|
|
940
|
-
let finishReason =
|
|
966
|
+
let finishReason = {
|
|
967
|
+
unified: "other",
|
|
968
|
+
raw: void 0
|
|
969
|
+
};
|
|
941
970
|
let usage = void 0;
|
|
942
971
|
let metadataExtracted = false;
|
|
943
972
|
let isActiveText = false;
|
|
@@ -954,13 +983,13 @@ var OpenAIChatLanguageModel = class {
|
|
|
954
983
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
955
984
|
}
|
|
956
985
|
if (!chunk.success) {
|
|
957
|
-
finishReason = "error";
|
|
986
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
958
987
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
959
988
|
return;
|
|
960
989
|
}
|
|
961
990
|
const value = chunk.value;
|
|
962
991
|
if ("error" in value) {
|
|
963
|
-
finishReason = "error";
|
|
992
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
964
993
|
controller.enqueue({ type: "error", error: value.error });
|
|
965
994
|
return;
|
|
966
995
|
}
|
|
@@ -985,7 +1014,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
985
1014
|
}
|
|
986
1015
|
const choice = value.choices[0];
|
|
987
1016
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
988
|
-
finishReason =
|
|
1017
|
+
finishReason = {
|
|
1018
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
1019
|
+
raw: choice.finish_reason
|
|
1020
|
+
};
|
|
989
1021
|
}
|
|
990
1022
|
if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
991
1023
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
@@ -1265,7 +1297,7 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1265
1297
|
case "tool_calls":
|
|
1266
1298
|
return "tool-calls";
|
|
1267
1299
|
default:
|
|
1268
|
-
return "
|
|
1300
|
+
return "other";
|
|
1269
1301
|
}
|
|
1270
1302
|
}
|
|
1271
1303
|
|
|
@@ -1463,6 +1495,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1463
1495
|
};
|
|
1464
1496
|
}
|
|
1465
1497
|
async doGenerate(options) {
|
|
1498
|
+
var _a;
|
|
1466
1499
|
const { args, warnings } = await this.getArgs(options);
|
|
1467
1500
|
const {
|
|
1468
1501
|
responseHeaders,
|
|
@@ -1490,7 +1523,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1490
1523
|
return {
|
|
1491
1524
|
content: [{ type: "text", text: choice.text }],
|
|
1492
1525
|
usage: convertOpenAICompletionUsage(response.usage),
|
|
1493
|
-
finishReason:
|
|
1526
|
+
finishReason: {
|
|
1527
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1528
|
+
raw: (_a = choice.finish_reason) != null ? _a : void 0
|
|
1529
|
+
},
|
|
1494
1530
|
request: { body: args },
|
|
1495
1531
|
response: {
|
|
1496
1532
|
...getResponseMetadata2(response),
|
|
@@ -1524,7 +1560,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1524
1560
|
abortSignal: options.abortSignal,
|
|
1525
1561
|
fetch: this.config.fetch
|
|
1526
1562
|
});
|
|
1527
|
-
let finishReason =
|
|
1563
|
+
let finishReason = {
|
|
1564
|
+
unified: "other",
|
|
1565
|
+
raw: void 0
|
|
1566
|
+
};
|
|
1528
1567
|
const providerMetadata = { openai: {} };
|
|
1529
1568
|
let usage = void 0;
|
|
1530
1569
|
let isFirstChunk = true;
|
|
@@ -1539,13 +1578,13 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1539
1578
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1540
1579
|
}
|
|
1541
1580
|
if (!chunk.success) {
|
|
1542
|
-
finishReason = "error";
|
|
1581
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1543
1582
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
1544
1583
|
return;
|
|
1545
1584
|
}
|
|
1546
1585
|
const value = chunk.value;
|
|
1547
1586
|
if ("error" in value) {
|
|
1548
|
-
finishReason = "error";
|
|
1587
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1549
1588
|
controller.enqueue({ type: "error", error: value.error });
|
|
1550
1589
|
return;
|
|
1551
1590
|
}
|
|
@@ -1562,7 +1601,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1562
1601
|
}
|
|
1563
1602
|
const choice = value.choices[0];
|
|
1564
1603
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1565
|
-
finishReason =
|
|
1604
|
+
finishReason = {
|
|
1605
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1606
|
+
raw: choice.finish_reason
|
|
1607
|
+
};
|
|
1566
1608
|
}
|
|
1567
1609
|
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1568
1610
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -1733,11 +1775,13 @@ var modelMaxImagesPerCall = {
|
|
|
1733
1775
|
"dall-e-3": 1,
|
|
1734
1776
|
"dall-e-2": 10,
|
|
1735
1777
|
"gpt-image-1": 10,
|
|
1736
|
-
"gpt-image-1-mini": 10
|
|
1778
|
+
"gpt-image-1-mini": 10,
|
|
1779
|
+
"gpt-image-1.5": 10
|
|
1737
1780
|
};
|
|
1738
1781
|
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1739
1782
|
"gpt-image-1",
|
|
1740
|
-
"gpt-image-1-mini"
|
|
1783
|
+
"gpt-image-1-mini",
|
|
1784
|
+
"gpt-image-1.5"
|
|
1741
1785
|
]);
|
|
1742
1786
|
|
|
1743
1787
|
// src/image/openai-image-model.ts
|
|
@@ -1756,6 +1800,8 @@ var OpenAIImageModel = class {
|
|
|
1756
1800
|
}
|
|
1757
1801
|
async doGenerate({
|
|
1758
1802
|
prompt,
|
|
1803
|
+
files,
|
|
1804
|
+
mask,
|
|
1759
1805
|
n,
|
|
1760
1806
|
size,
|
|
1761
1807
|
aspectRatio,
|
|
@@ -1764,7 +1810,7 @@ var OpenAIImageModel = class {
|
|
|
1764
1810
|
headers,
|
|
1765
1811
|
abortSignal
|
|
1766
1812
|
}) {
|
|
1767
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1813
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1768
1814
|
const warnings = [];
|
|
1769
1815
|
if (aspectRatio != null) {
|
|
1770
1816
|
warnings.push({
|
|
@@ -1777,6 +1823,72 @@ var OpenAIImageModel = class {
|
|
|
1777
1823
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1778
1824
|
}
|
|
1779
1825
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1826
|
+
if (files != null) {
|
|
1827
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
|
|
1828
|
+
url: this.config.url({
|
|
1829
|
+
path: "/images/edits",
|
|
1830
|
+
modelId: this.modelId
|
|
1831
|
+
}),
|
|
1832
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1833
|
+
formData: (0, import_provider_utils13.convertToFormData)({
|
|
1834
|
+
model: this.modelId,
|
|
1835
|
+
prompt,
|
|
1836
|
+
image: await Promise.all(
|
|
1837
|
+
files.map(
|
|
1838
|
+
(file) => file.type === "file" ? new Blob(
|
|
1839
|
+
[
|
|
1840
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1841
|
+
type: file.mediaType
|
|
1842
|
+
}) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
|
|
1843
|
+
type: file.mediaType
|
|
1844
|
+
})
|
|
1845
|
+
],
|
|
1846
|
+
{ type: file.mediaType }
|
|
1847
|
+
) : (0, import_provider_utils13.downloadBlob)(file.url)
|
|
1848
|
+
)
|
|
1849
|
+
),
|
|
1850
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1851
|
+
n,
|
|
1852
|
+
size,
|
|
1853
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1854
|
+
}),
|
|
1855
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1856
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1857
|
+
openaiImageResponseSchema
|
|
1858
|
+
),
|
|
1859
|
+
abortSignal,
|
|
1860
|
+
fetch: this.config.fetch
|
|
1861
|
+
});
|
|
1862
|
+
return {
|
|
1863
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1864
|
+
warnings,
|
|
1865
|
+
usage: response2.usage != null ? {
|
|
1866
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1867
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1868
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1869
|
+
} : void 0,
|
|
1870
|
+
response: {
|
|
1871
|
+
timestamp: currentDate,
|
|
1872
|
+
modelId: this.modelId,
|
|
1873
|
+
headers: responseHeaders2
|
|
1874
|
+
},
|
|
1875
|
+
providerMetadata: {
|
|
1876
|
+
openai: {
|
|
1877
|
+
images: response2.data.map((item) => {
|
|
1878
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1879
|
+
return {
|
|
1880
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1881
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1882
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1883
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1884
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1885
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1886
|
+
};
|
|
1887
|
+
})
|
|
1888
|
+
}
|
|
1889
|
+
}
|
|
1890
|
+
};
|
|
1891
|
+
}
|
|
1780
1892
|
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1781
1893
|
url: this.config.url({
|
|
1782
1894
|
path: "/images/generations",
|
|
@@ -1788,7 +1900,7 @@ var OpenAIImageModel = class {
|
|
|
1788
1900
|
prompt,
|
|
1789
1901
|
n,
|
|
1790
1902
|
size,
|
|
1791
|
-
...(
|
|
1903
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1792
1904
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1793
1905
|
},
|
|
1794
1906
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1802,9 +1914,9 @@ var OpenAIImageModel = class {
|
|
|
1802
1914
|
images: response.data.map((item) => item.b64_json),
|
|
1803
1915
|
warnings,
|
|
1804
1916
|
usage: response.usage != null ? {
|
|
1805
|
-
inputTokens: (
|
|
1806
|
-
outputTokens: (
|
|
1807
|
-
totalTokens: (
|
|
1917
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1918
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1919
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1808
1920
|
} : void 0,
|
|
1809
1921
|
response: {
|
|
1810
1922
|
timestamp: currentDate,
|
|
@@ -1829,6 +1941,14 @@ var OpenAIImageModel = class {
|
|
|
1829
1941
|
};
|
|
1830
1942
|
}
|
|
1831
1943
|
};
|
|
1944
|
+
async function fileToBlob(file) {
|
|
1945
|
+
if (!file) return void 0;
|
|
1946
|
+
if (file.type === "url") {
|
|
1947
|
+
return (0, import_provider_utils13.downloadBlob)(file.url);
|
|
1948
|
+
}
|
|
1949
|
+
const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
|
|
1950
|
+
return new Blob([data], { type: file.mediaType });
|
|
1951
|
+
}
|
|
1832
1952
|
|
|
1833
1953
|
// src/transcription/openai-transcription-model.ts
|
|
1834
1954
|
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
|
@@ -2572,6 +2692,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2572
2692
|
}
|
|
2573
2693
|
case "tool": {
|
|
2574
2694
|
for (const part of content) {
|
|
2695
|
+
if (part.type === "tool-approval-response") {
|
|
2696
|
+
continue;
|
|
2697
|
+
}
|
|
2575
2698
|
const output = part.output;
|
|
2576
2699
|
const resolvedToolName = toolNameMapping.toProviderToolName(
|
|
2577
2700
|
part.toolName
|
|
@@ -2586,7 +2709,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2586
2709
|
call_id: part.toolCallId,
|
|
2587
2710
|
output: parsedOutput.output
|
|
2588
2711
|
});
|
|
2589
|
-
|
|
2712
|
+
continue;
|
|
2590
2713
|
}
|
|
2591
2714
|
if (hasShellTool && resolvedToolName === "shell" && output.type === "json") {
|
|
2592
2715
|
const parsedOutput = await (0, import_provider_utils22.validateTypes)({
|
|
@@ -2605,7 +2728,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2605
2728
|
}
|
|
2606
2729
|
}))
|
|
2607
2730
|
});
|
|
2608
|
-
|
|
2731
|
+
continue;
|
|
2609
2732
|
}
|
|
2610
2733
|
if (hasApplyPatchTool && part.toolName === "apply_patch" && output.type === "json") {
|
|
2611
2734
|
const parsedOutput = await (0, import_provider_utils22.validateTypes)({
|
|
@@ -2618,7 +2741,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2618
2741
|
status: parsedOutput.status,
|
|
2619
2742
|
output: parsedOutput.output
|
|
2620
2743
|
});
|
|
2621
|
-
|
|
2744
|
+
continue;
|
|
2622
2745
|
}
|
|
2623
2746
|
let contentValue;
|
|
2624
2747
|
switch (output.type) {
|
|
@@ -2699,7 +2822,7 @@ function mapOpenAIResponseFinishReason({
|
|
|
2699
2822
|
case "content_filter":
|
|
2700
2823
|
return "content-filter";
|
|
2701
2824
|
default:
|
|
2702
|
-
return hasFunctionCall ? "tool-calls" : "
|
|
2825
|
+
return hasFunctionCall ? "tool-calls" : "other";
|
|
2703
2826
|
}
|
|
2704
2827
|
}
|
|
2705
2828
|
|
|
@@ -3098,6 +3221,19 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
|
|
|
3098
3221
|
item_id: import_v416.z.string(),
|
|
3099
3222
|
summary_index: import_v416.z.number()
|
|
3100
3223
|
}),
|
|
3224
|
+
import_v416.z.object({
|
|
3225
|
+
type: import_v416.z.literal("response.apply_patch_call_operation_diff.delta"),
|
|
3226
|
+
item_id: import_v416.z.string(),
|
|
3227
|
+
output_index: import_v416.z.number(),
|
|
3228
|
+
delta: import_v416.z.string(),
|
|
3229
|
+
obfuscation: import_v416.z.string().nullish()
|
|
3230
|
+
}),
|
|
3231
|
+
import_v416.z.object({
|
|
3232
|
+
type: import_v416.z.literal("response.apply_patch_call_operation_diff.done"),
|
|
3233
|
+
item_id: import_v416.z.string(),
|
|
3234
|
+
output_index: import_v416.z.number(),
|
|
3235
|
+
diff: import_v416.z.string()
|
|
3236
|
+
}),
|
|
3101
3237
|
import_v416.z.object({
|
|
3102
3238
|
type: import_v416.z.literal("error"),
|
|
3103
3239
|
sequence_number: import_v416.z.number(),
|
|
@@ -3578,7 +3714,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
|
|
|
3578
3714
|
* Defaults to `undefined`.
|
|
3579
3715
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3580
3716
|
*/
|
|
3581
|
-
user: import_v417.z.string().nullish()
|
|
3717
|
+
user: import_v417.z.string().nullish(),
|
|
3718
|
+
/**
|
|
3719
|
+
* Override the system message mode for this model.
|
|
3720
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3721
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3722
|
+
* - 'remove': Remove system messages entirely
|
|
3723
|
+
*
|
|
3724
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3725
|
+
*/
|
|
3726
|
+
systemMessageMode: import_v417.z.enum(["system", "developer", "remove"]).optional(),
|
|
3727
|
+
/**
|
|
3728
|
+
* Force treating this model as a reasoning model.
|
|
3729
|
+
*
|
|
3730
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3731
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3732
|
+
*
|
|
3733
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3734
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3735
|
+
*/
|
|
3736
|
+
forceReasoning: import_v417.z.boolean().optional()
|
|
3582
3737
|
})
|
|
3583
3738
|
)
|
|
3584
3739
|
);
|
|
@@ -3742,16 +3897,14 @@ var mcpArgsSchema = (0, import_provider_utils28.lazySchema)(
|
|
|
3742
3897
|
authorization: import_v421.z.string().optional(),
|
|
3743
3898
|
connectorId: import_v421.z.string().optional(),
|
|
3744
3899
|
headers: import_v421.z.record(import_v421.z.string(), import_v421.z.string()).optional(),
|
|
3745
|
-
|
|
3746
|
-
|
|
3747
|
-
|
|
3748
|
-
|
|
3749
|
-
|
|
3750
|
-
|
|
3751
|
-
|
|
3752
|
-
|
|
3753
|
-
// ])
|
|
3754
|
-
// .optional(),
|
|
3900
|
+
requireApproval: import_v421.z.union([
|
|
3901
|
+
import_v421.z.enum(["always", "never"]),
|
|
3902
|
+
import_v421.z.object({
|
|
3903
|
+
never: import_v421.z.object({
|
|
3904
|
+
toolNames: import_v421.z.array(import_v421.z.string()).optional()
|
|
3905
|
+
}).optional()
|
|
3906
|
+
})
|
|
3907
|
+
]).optional(),
|
|
3755
3908
|
serverDescription: import_v421.z.string().optional(),
|
|
3756
3909
|
serverUrl: import_v421.z.string().optional()
|
|
3757
3910
|
}).refine(
|
|
@@ -3763,36 +3916,14 @@ var mcpArgsSchema = (0, import_provider_utils28.lazySchema)(
|
|
|
3763
3916
|
var mcpInputSchema = (0, import_provider_utils28.lazySchema)(() => (0, import_provider_utils28.zodSchema)(import_v421.z.object({})));
|
|
3764
3917
|
var mcpOutputSchema = (0, import_provider_utils28.lazySchema)(
|
|
3765
3918
|
() => (0, import_provider_utils28.zodSchema)(
|
|
3766
|
-
import_v421.z.
|
|
3767
|
-
import_v421.z.
|
|
3768
|
-
|
|
3769
|
-
|
|
3770
|
-
|
|
3771
|
-
|
|
3772
|
-
|
|
3773
|
-
|
|
3774
|
-
}),
|
|
3775
|
-
import_v421.z.object({
|
|
3776
|
-
type: import_v421.z.literal("listTools"),
|
|
3777
|
-
serverLabel: import_v421.z.string(),
|
|
3778
|
-
tools: import_v421.z.array(
|
|
3779
|
-
import_v421.z.object({
|
|
3780
|
-
name: import_v421.z.string(),
|
|
3781
|
-
description: import_v421.z.string().optional(),
|
|
3782
|
-
inputSchema: jsonValueSchema,
|
|
3783
|
-
annotations: import_v421.z.record(import_v421.z.string(), jsonValueSchema).optional()
|
|
3784
|
-
})
|
|
3785
|
-
),
|
|
3786
|
-
error: import_v421.z.union([import_v421.z.string(), jsonValueSchema]).optional()
|
|
3787
|
-
}),
|
|
3788
|
-
import_v421.z.object({
|
|
3789
|
-
type: import_v421.z.literal("approvalRequest"),
|
|
3790
|
-
serverLabel: import_v421.z.string(),
|
|
3791
|
-
name: import_v421.z.string(),
|
|
3792
|
-
arguments: import_v421.z.string(),
|
|
3793
|
-
approvalRequestId: import_v421.z.string()
|
|
3794
|
-
})
|
|
3795
|
-
])
|
|
3919
|
+
import_v421.z.object({
|
|
3920
|
+
type: import_v421.z.literal("call"),
|
|
3921
|
+
serverLabel: import_v421.z.string(),
|
|
3922
|
+
name: import_v421.z.string(),
|
|
3923
|
+
arguments: import_v421.z.string(),
|
|
3924
|
+
output: import_v421.z.string().nullable().optional(),
|
|
3925
|
+
error: import_v421.z.union([import_v421.z.string(), jsonValueSchema]).optional()
|
|
3926
|
+
})
|
|
3796
3927
|
)
|
|
3797
3928
|
);
|
|
3798
3929
|
var mcpToolFactory = (0, import_provider_utils28.createProviderToolFactoryWithOutputSchema)({
|
|
@@ -4105,7 +4236,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4105
4236
|
toolChoice,
|
|
4106
4237
|
responseFormat
|
|
4107
4238
|
}) {
|
|
4108
|
-
var _a, _b, _c, _d;
|
|
4239
|
+
var _a, _b, _c, _d, _e, _f;
|
|
4109
4240
|
const warnings = [];
|
|
4110
4241
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
4111
4242
|
if (topK != null) {
|
|
@@ -4128,6 +4259,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4128
4259
|
providerOptions,
|
|
4129
4260
|
schema: openaiResponsesProviderOptionsSchema
|
|
4130
4261
|
});
|
|
4262
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
4131
4263
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
4132
4264
|
warnings.push({
|
|
4133
4265
|
type: "unsupported",
|
|
@@ -4152,15 +4284,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4152
4284
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
4153
4285
|
prompt,
|
|
4154
4286
|
toolNameMapping,
|
|
4155
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4287
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
4156
4288
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
4157
|
-
store: (
|
|
4289
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
4158
4290
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
4159
4291
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
4160
4292
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
4161
4293
|
});
|
|
4162
4294
|
warnings.push(...inputWarnings);
|
|
4163
|
-
const strictJsonSchema = (
|
|
4295
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
4164
4296
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
4165
4297
|
function addInclude(key) {
|
|
4166
4298
|
if (include == null) {
|
|
@@ -4176,9 +4308,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4176
4308
|
if (topLogprobs) {
|
|
4177
4309
|
addInclude("message.output_text.logprobs");
|
|
4178
4310
|
}
|
|
4179
|
-
const webSearchToolName = (
|
|
4311
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
4180
4312
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
4181
|
-
)) == null ? void 0 :
|
|
4313
|
+
)) == null ? void 0 : _e.name;
|
|
4182
4314
|
if (webSearchToolName) {
|
|
4183
4315
|
addInclude("web_search_call.action.sources");
|
|
4184
4316
|
}
|
|
@@ -4186,7 +4318,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4186
4318
|
addInclude("code_interpreter_call.outputs");
|
|
4187
4319
|
}
|
|
4188
4320
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
4189
|
-
if (store === false &&
|
|
4321
|
+
if (store === false && isReasoningModel) {
|
|
4190
4322
|
addInclude("reasoning.encrypted_content");
|
|
4191
4323
|
}
|
|
4192
4324
|
const baseArgs = {
|
|
@@ -4201,7 +4333,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4201
4333
|
format: responseFormat.schema != null ? {
|
|
4202
4334
|
type: "json_schema",
|
|
4203
4335
|
strict: strictJsonSchema,
|
|
4204
|
-
name: (
|
|
4336
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
4205
4337
|
description: responseFormat.description,
|
|
4206
4338
|
schema: responseFormat.schema
|
|
4207
4339
|
} : { type: "json_object" }
|
|
@@ -4228,7 +4360,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4228
4360
|
top_logprobs: topLogprobs,
|
|
4229
4361
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4230
4362
|
// model-specific settings:
|
|
4231
|
-
...
|
|
4363
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4232
4364
|
reasoning: {
|
|
4233
4365
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
4234
4366
|
effort: openaiOptions.reasoningEffort
|
|
@@ -4239,7 +4371,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4239
4371
|
}
|
|
4240
4372
|
}
|
|
4241
4373
|
};
|
|
4242
|
-
if (
|
|
4374
|
+
if (isReasoningModel) {
|
|
4243
4375
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
4244
4376
|
if (baseArgs.temperature != null) {
|
|
4245
4377
|
baseArgs.temperature = void 0;
|
|
@@ -4311,7 +4443,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4311
4443
|
};
|
|
4312
4444
|
}
|
|
4313
4445
|
async doGenerate(options) {
|
|
4314
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
4446
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z;
|
|
4315
4447
|
const {
|
|
4316
4448
|
args: body,
|
|
4317
4449
|
warnings,
|
|
@@ -4562,54 +4694,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4562
4694
|
break;
|
|
4563
4695
|
}
|
|
4564
4696
|
case "mcp_list_tools": {
|
|
4565
|
-
content.push({
|
|
4566
|
-
type: "tool-call",
|
|
4567
|
-
toolCallId: part.id,
|
|
4568
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4569
|
-
input: JSON.stringify({}),
|
|
4570
|
-
providerExecuted: true
|
|
4571
|
-
});
|
|
4572
|
-
content.push({
|
|
4573
|
-
type: "tool-result",
|
|
4574
|
-
toolCallId: part.id,
|
|
4575
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4576
|
-
result: {
|
|
4577
|
-
type: "listTools",
|
|
4578
|
-
serverLabel: part.server_label,
|
|
4579
|
-
tools: part.tools.map((t) => {
|
|
4580
|
-
var _a2, _b2;
|
|
4581
|
-
return {
|
|
4582
|
-
name: t.name,
|
|
4583
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
4584
|
-
inputSchema: t.input_schema,
|
|
4585
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
4586
|
-
};
|
|
4587
|
-
}),
|
|
4588
|
-
...part.error != null ? { error: part.error } : {}
|
|
4589
|
-
}
|
|
4590
|
-
});
|
|
4591
4697
|
break;
|
|
4592
4698
|
}
|
|
4593
4699
|
case "mcp_approval_request": {
|
|
4594
|
-
content.push({
|
|
4595
|
-
type: "tool-call",
|
|
4596
|
-
toolCallId: part.id,
|
|
4597
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4598
|
-
input: JSON.stringify({}),
|
|
4599
|
-
providerExecuted: true
|
|
4600
|
-
});
|
|
4601
|
-
content.push({
|
|
4602
|
-
type: "tool-result",
|
|
4603
|
-
toolCallId: part.id,
|
|
4604
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4605
|
-
result: {
|
|
4606
|
-
type: "approvalRequest",
|
|
4607
|
-
serverLabel: part.server_label,
|
|
4608
|
-
name: part.name,
|
|
4609
|
-
arguments: part.arguments,
|
|
4610
|
-
approvalRequestId: part.approval_request_id
|
|
4611
|
-
}
|
|
4612
|
-
});
|
|
4613
4700
|
break;
|
|
4614
4701
|
}
|
|
4615
4702
|
case "computer_call": {
|
|
@@ -4708,10 +4795,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4708
4795
|
const usage = response.usage;
|
|
4709
4796
|
return {
|
|
4710
4797
|
content,
|
|
4711
|
-
finishReason:
|
|
4712
|
-
|
|
4713
|
-
|
|
4714
|
-
|
|
4798
|
+
finishReason: {
|
|
4799
|
+
unified: mapOpenAIResponseFinishReason({
|
|
4800
|
+
finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
|
|
4801
|
+
hasFunctionCall
|
|
4802
|
+
}),
|
|
4803
|
+
raw: (_z = (_y = response.incomplete_details) == null ? void 0 : _y.reason) != null ? _z : void 0
|
|
4804
|
+
},
|
|
4715
4805
|
usage: convertOpenAIResponsesUsage(usage),
|
|
4716
4806
|
request: { body },
|
|
4717
4807
|
response: {
|
|
@@ -4752,7 +4842,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4752
4842
|
});
|
|
4753
4843
|
const self = this;
|
|
4754
4844
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4755
|
-
let finishReason =
|
|
4845
|
+
let finishReason = {
|
|
4846
|
+
unified: "other",
|
|
4847
|
+
raw: void 0
|
|
4848
|
+
};
|
|
4756
4849
|
let usage = void 0;
|
|
4757
4850
|
const logprobs = [];
|
|
4758
4851
|
let responseId = null;
|
|
@@ -4768,12 +4861,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4768
4861
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4769
4862
|
},
|
|
4770
4863
|
transform(chunk, controller) {
|
|
4771
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
|
|
4864
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C;
|
|
4772
4865
|
if (options.includeRawChunks) {
|
|
4773
4866
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4774
4867
|
}
|
|
4775
4868
|
if (!chunk.success) {
|
|
4776
|
-
finishReason = "error";
|
|
4869
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
4777
4870
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
4778
4871
|
return;
|
|
4779
4872
|
}
|
|
@@ -4872,24 +4965,40 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4872
4965
|
providerExecuted: true
|
|
4873
4966
|
});
|
|
4874
4967
|
} else if (value.item.type === "apply_patch_call") {
|
|
4968
|
+
const { call_id: callId, operation } = value.item;
|
|
4875
4969
|
ongoingToolCalls[value.output_index] = {
|
|
4876
4970
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
4877
|
-
toolCallId:
|
|
4971
|
+
toolCallId: callId,
|
|
4972
|
+
applyPatch: {
|
|
4973
|
+
// delete_file doesn't have diff
|
|
4974
|
+
hasDiff: operation.type === "delete_file",
|
|
4975
|
+
endEmitted: operation.type === "delete_file"
|
|
4976
|
+
}
|
|
4878
4977
|
};
|
|
4879
|
-
|
|
4978
|
+
controller.enqueue({
|
|
4979
|
+
type: "tool-input-start",
|
|
4980
|
+
id: callId,
|
|
4981
|
+
toolName: toolNameMapping.toCustomToolName("apply_patch")
|
|
4982
|
+
});
|
|
4983
|
+
if (operation.type === "delete_file") {
|
|
4984
|
+
const inputString = JSON.stringify({
|
|
4985
|
+
callId,
|
|
4986
|
+
operation
|
|
4987
|
+
});
|
|
4880
4988
|
controller.enqueue({
|
|
4881
|
-
type: "tool-
|
|
4882
|
-
|
|
4883
|
-
|
|
4884
|
-
|
|
4885
|
-
|
|
4886
|
-
|
|
4887
|
-
|
|
4888
|
-
|
|
4889
|
-
|
|
4890
|
-
|
|
4891
|
-
|
|
4892
|
-
|
|
4989
|
+
type: "tool-input-delta",
|
|
4990
|
+
id: callId,
|
|
4991
|
+
delta: inputString
|
|
4992
|
+
});
|
|
4993
|
+
controller.enqueue({
|
|
4994
|
+
type: "tool-input-end",
|
|
4995
|
+
id: callId
|
|
4996
|
+
});
|
|
4997
|
+
} else {
|
|
4998
|
+
controller.enqueue({
|
|
4999
|
+
type: "tool-input-delta",
|
|
5000
|
+
id: callId,
|
|
5001
|
+
delta: `{"callId":"${escapeJSONDelta(callId)}","operation":{"type":"${escapeJSONDelta(operation.type)}","path":"${escapeJSONDelta(operation.path)}","diff":"`
|
|
4893
5002
|
});
|
|
4894
5003
|
}
|
|
4895
5004
|
} else if (value.item.type === "shell_call") {
|
|
@@ -5041,31 +5150,31 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5041
5150
|
});
|
|
5042
5151
|
} else if (value.item.type === "mcp_list_tools") {
|
|
5043
5152
|
ongoingToolCalls[value.output_index] = void 0;
|
|
5044
|
-
controller.enqueue({
|
|
5045
|
-
type: "tool-result",
|
|
5046
|
-
toolCallId: value.item.id,
|
|
5047
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
5048
|
-
result: {
|
|
5049
|
-
type: "listTools",
|
|
5050
|
-
serverLabel: value.item.server_label,
|
|
5051
|
-
tools: value.item.tools.map((t) => {
|
|
5052
|
-
var _a2, _b2;
|
|
5053
|
-
return {
|
|
5054
|
-
name: t.name,
|
|
5055
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
5056
|
-
inputSchema: t.input_schema,
|
|
5057
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
5058
|
-
};
|
|
5059
|
-
}),
|
|
5060
|
-
...value.item.error != null ? { error: value.item.error } : {}
|
|
5061
|
-
}
|
|
5062
|
-
});
|
|
5063
5153
|
} else if (value.item.type === "apply_patch_call") {
|
|
5064
|
-
ongoingToolCalls[value.output_index]
|
|
5065
|
-
if (value.item.
|
|
5154
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
5155
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted && value.item.operation.type !== "delete_file") {
|
|
5156
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
5157
|
+
controller.enqueue({
|
|
5158
|
+
type: "tool-input-delta",
|
|
5159
|
+
id: toolCall.toolCallId,
|
|
5160
|
+
delta: escapeJSONDelta(value.item.operation.diff)
|
|
5161
|
+
});
|
|
5162
|
+
}
|
|
5163
|
+
controller.enqueue({
|
|
5164
|
+
type: "tool-input-delta",
|
|
5165
|
+
id: toolCall.toolCallId,
|
|
5166
|
+
delta: '"}}'
|
|
5167
|
+
});
|
|
5168
|
+
controller.enqueue({
|
|
5169
|
+
type: "tool-input-end",
|
|
5170
|
+
id: toolCall.toolCallId
|
|
5171
|
+
});
|
|
5172
|
+
toolCall.applyPatch.endEmitted = true;
|
|
5173
|
+
}
|
|
5174
|
+
if (toolCall && value.item.status === "completed") {
|
|
5066
5175
|
controller.enqueue({
|
|
5067
5176
|
type: "tool-call",
|
|
5068
|
-
toolCallId:
|
|
5177
|
+
toolCallId: toolCall.toolCallId,
|
|
5069
5178
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
5070
5179
|
input: JSON.stringify({
|
|
5071
5180
|
callId: value.item.call_id,
|
|
@@ -5078,20 +5187,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5078
5187
|
}
|
|
5079
5188
|
});
|
|
5080
5189
|
}
|
|
5190
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
5081
5191
|
} else if (value.item.type === "mcp_approval_request") {
|
|
5082
5192
|
ongoingToolCalls[value.output_index] = void 0;
|
|
5083
|
-
controller.enqueue({
|
|
5084
|
-
type: "tool-result",
|
|
5085
|
-
toolCallId: value.item.id,
|
|
5086
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
5087
|
-
result: {
|
|
5088
|
-
type: "approvalRequest",
|
|
5089
|
-
serverLabel: value.item.server_label,
|
|
5090
|
-
name: value.item.name,
|
|
5091
|
-
arguments: value.item.arguments,
|
|
5092
|
-
approvalRequestId: value.item.approval_request_id
|
|
5093
|
-
}
|
|
5094
|
-
});
|
|
5095
5193
|
} else if (value.item.type === "local_shell_call") {
|
|
5096
5194
|
ongoingToolCalls[value.output_index] = void 0;
|
|
5097
5195
|
controller.enqueue({
|
|
@@ -5157,6 +5255,38 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5157
5255
|
delta: value.delta
|
|
5158
5256
|
});
|
|
5159
5257
|
}
|
|
5258
|
+
} else if (isResponseApplyPatchCallOperationDiffDeltaChunk(value)) {
|
|
5259
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
5260
|
+
if (toolCall == null ? void 0 : toolCall.applyPatch) {
|
|
5261
|
+
controller.enqueue({
|
|
5262
|
+
type: "tool-input-delta",
|
|
5263
|
+
id: toolCall.toolCallId,
|
|
5264
|
+
delta: escapeJSONDelta(value.delta)
|
|
5265
|
+
});
|
|
5266
|
+
toolCall.applyPatch.hasDiff = true;
|
|
5267
|
+
}
|
|
5268
|
+
} else if (isResponseApplyPatchCallOperationDiffDoneChunk(value)) {
|
|
5269
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
5270
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted) {
|
|
5271
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
5272
|
+
controller.enqueue({
|
|
5273
|
+
type: "tool-input-delta",
|
|
5274
|
+
id: toolCall.toolCallId,
|
|
5275
|
+
delta: escapeJSONDelta(value.diff)
|
|
5276
|
+
});
|
|
5277
|
+
toolCall.applyPatch.hasDiff = true;
|
|
5278
|
+
}
|
|
5279
|
+
controller.enqueue({
|
|
5280
|
+
type: "tool-input-delta",
|
|
5281
|
+
id: toolCall.toolCallId,
|
|
5282
|
+
delta: '"}}'
|
|
5283
|
+
});
|
|
5284
|
+
controller.enqueue({
|
|
5285
|
+
type: "tool-input-end",
|
|
5286
|
+
id: toolCall.toolCallId
|
|
5287
|
+
});
|
|
5288
|
+
toolCall.applyPatch.endEmitted = true;
|
|
5289
|
+
}
|
|
5160
5290
|
} else if (isResponseImageGenerationCallPartialImageChunk(value)) {
|
|
5161
5291
|
controller.enqueue({
|
|
5162
5292
|
type: "tool-result",
|
|
@@ -5173,9 +5303,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5173
5303
|
controller.enqueue({
|
|
5174
5304
|
type: "tool-input-delta",
|
|
5175
5305
|
id: toolCall.toolCallId,
|
|
5176
|
-
|
|
5177
|
-
// To escape it, we use JSON.stringify and slice to remove the outer quotes.
|
|
5178
|
-
delta: JSON.stringify(value.delta).slice(1, -1)
|
|
5306
|
+
delta: escapeJSONDelta(value.delta)
|
|
5179
5307
|
});
|
|
5180
5308
|
}
|
|
5181
5309
|
} else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
|
|
@@ -5272,10 +5400,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5272
5400
|
activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
|
|
5273
5401
|
}
|
|
5274
5402
|
} else if (isResponseFinishedChunk(value)) {
|
|
5275
|
-
finishReason =
|
|
5276
|
-
|
|
5277
|
-
|
|
5278
|
-
|
|
5403
|
+
finishReason = {
|
|
5404
|
+
unified: mapOpenAIResponseFinishReason({
|
|
5405
|
+
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
5406
|
+
hasFunctionCall
|
|
5407
|
+
}),
|
|
5408
|
+
raw: (_k = (_j = value.response.incomplete_details) == null ? void 0 : _j.reason) != null ? _k : void 0
|
|
5409
|
+
};
|
|
5279
5410
|
usage = value.response.usage;
|
|
5280
5411
|
if (typeof value.response.service_tier === "string") {
|
|
5281
5412
|
serviceTier = value.response.service_tier;
|
|
@@ -5286,7 +5417,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5286
5417
|
controller.enqueue({
|
|
5287
5418
|
type: "source",
|
|
5288
5419
|
sourceType: "url",
|
|
5289
|
-
id: (
|
|
5420
|
+
id: (_n = (_m = (_l = self.config).generateId) == null ? void 0 : _m.call(_l)) != null ? _n : (0, import_provider_utils32.generateId)(),
|
|
5290
5421
|
url: value.annotation.url,
|
|
5291
5422
|
title: value.annotation.title
|
|
5292
5423
|
});
|
|
@@ -5294,10 +5425,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5294
5425
|
controller.enqueue({
|
|
5295
5426
|
type: "source",
|
|
5296
5427
|
sourceType: "document",
|
|
5297
|
-
id: (
|
|
5428
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils32.generateId)(),
|
|
5298
5429
|
mediaType: "text/plain",
|
|
5299
|
-
title: (
|
|
5300
|
-
filename: (
|
|
5430
|
+
title: (_s = (_r = value.annotation.quote) != null ? _r : value.annotation.filename) != null ? _s : "Document",
|
|
5431
|
+
filename: (_t = value.annotation.filename) != null ? _t : value.annotation.file_id,
|
|
5301
5432
|
...value.annotation.file_id ? {
|
|
5302
5433
|
providerMetadata: {
|
|
5303
5434
|
[providerKey]: {
|
|
@@ -5310,10 +5441,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5310
5441
|
controller.enqueue({
|
|
5311
5442
|
type: "source",
|
|
5312
5443
|
sourceType: "document",
|
|
5313
|
-
id: (
|
|
5444
|
+
id: (_w = (_v = (_u = self.config).generateId) == null ? void 0 : _v.call(_u)) != null ? _w : (0, import_provider_utils32.generateId)(),
|
|
5314
5445
|
mediaType: "text/plain",
|
|
5315
|
-
title: (
|
|
5316
|
-
filename: (
|
|
5446
|
+
title: (_y = (_x = value.annotation.filename) != null ? _x : value.annotation.file_id) != null ? _y : "Document",
|
|
5447
|
+
filename: (_z = value.annotation.filename) != null ? _z : value.annotation.file_id,
|
|
5317
5448
|
providerMetadata: {
|
|
5318
5449
|
[providerKey]: {
|
|
5319
5450
|
fileId: value.annotation.file_id,
|
|
@@ -5326,7 +5457,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5326
5457
|
controller.enqueue({
|
|
5327
5458
|
type: "source",
|
|
5328
5459
|
sourceType: "document",
|
|
5329
|
-
id: (
|
|
5460
|
+
id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : (0, import_provider_utils32.generateId)(),
|
|
5330
5461
|
mediaType: "application/octet-stream",
|
|
5331
5462
|
title: value.annotation.file_id,
|
|
5332
5463
|
filename: value.annotation.file_id,
|
|
@@ -5392,6 +5523,12 @@ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
|
|
|
5392
5523
|
function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
|
|
5393
5524
|
return chunk.type === "response.code_interpreter_call_code.done";
|
|
5394
5525
|
}
|
|
5526
|
+
function isResponseApplyPatchCallOperationDiffDeltaChunk(chunk) {
|
|
5527
|
+
return chunk.type === "response.apply_patch_call_operation_diff.delta";
|
|
5528
|
+
}
|
|
5529
|
+
function isResponseApplyPatchCallOperationDiffDoneChunk(chunk) {
|
|
5530
|
+
return chunk.type === "response.apply_patch_call_operation_diff.done";
|
|
5531
|
+
}
|
|
5395
5532
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
5396
5533
|
return chunk.type === "response.output_item.added";
|
|
5397
5534
|
}
|
|
@@ -5422,6 +5559,9 @@ function mapWebSearchOutput(action) {
|
|
|
5422
5559
|
};
|
|
5423
5560
|
}
|
|
5424
5561
|
}
|
|
5562
|
+
function escapeJSONDelta(delta) {
|
|
5563
|
+
return JSON.stringify(delta).slice(1, -1);
|
|
5564
|
+
}
|
|
5425
5565
|
// Annotate the CommonJS export names for ESM import in node:
|
|
5426
5566
|
0 && (module.exports = {
|
|
5427
5567
|
OpenAIChatLanguageModel,
|