@ai-sdk/openai 3.0.0-beta.98 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +299 -0
- package/dist/index.d.mts +18 -17
- package/dist/index.d.ts +18 -17
- package/dist/index.js +342 -200
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +350 -204
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +12 -10
- package/dist/internal/index.d.ts +12 -10
- package/dist/internal/index.js +339 -199
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +347 -203
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.mjs
CHANGED
|
@@ -43,8 +43,8 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
43
43
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
44
44
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
45
45
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
46
|
-
const isReasoningModel =
|
|
47
|
-
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
|
|
46
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
47
|
+
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
48
48
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
49
49
|
return {
|
|
50
50
|
supportsFlexProcessing,
|
|
@@ -245,6 +245,9 @@ function convertToOpenAIChatMessages({
|
|
|
245
245
|
}
|
|
246
246
|
case "tool": {
|
|
247
247
|
for (const toolResponse of content) {
|
|
248
|
+
if (toolResponse.type === "tool-approval-response") {
|
|
249
|
+
continue;
|
|
250
|
+
}
|
|
248
251
|
const output = toolResponse.output;
|
|
249
252
|
let contentValue;
|
|
250
253
|
switch (output.type) {
|
|
@@ -304,7 +307,7 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
304
307
|
case "tool_calls":
|
|
305
308
|
return "tool-calls";
|
|
306
309
|
default:
|
|
307
|
-
return "
|
|
310
|
+
return "other";
|
|
308
311
|
}
|
|
309
312
|
}
|
|
310
313
|
|
|
@@ -545,7 +548,26 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
545
548
|
* username or email address, in order to avoid sending us any identifying
|
|
546
549
|
* information.
|
|
547
550
|
*/
|
|
548
|
-
safetyIdentifier: z3.string().optional()
|
|
551
|
+
safetyIdentifier: z3.string().optional(),
|
|
552
|
+
/**
|
|
553
|
+
* Override the system message mode for this model.
|
|
554
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
555
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
556
|
+
* - 'remove': Remove system messages entirely
|
|
557
|
+
*
|
|
558
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
559
|
+
*/
|
|
560
|
+
systemMessageMode: z3.enum(["system", "developer", "remove"]).optional(),
|
|
561
|
+
/**
|
|
562
|
+
* Force treating this model as a reasoning model.
|
|
563
|
+
*
|
|
564
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
565
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
566
|
+
*
|
|
567
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
568
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
569
|
+
*/
|
|
570
|
+
forceReasoning: z3.boolean().optional()
|
|
549
571
|
})
|
|
550
572
|
)
|
|
551
573
|
);
|
|
@@ -642,7 +664,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
642
664
|
toolChoice,
|
|
643
665
|
providerOptions
|
|
644
666
|
}) {
|
|
645
|
-
var _a, _b, _c;
|
|
667
|
+
var _a, _b, _c, _d, _e;
|
|
646
668
|
const warnings = [];
|
|
647
669
|
const openaiOptions = (_a = await parseProviderOptions({
|
|
648
670
|
provider: "openai",
|
|
@@ -650,17 +672,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
650
672
|
schema: openaiChatLanguageModelOptions
|
|
651
673
|
})) != null ? _a : {};
|
|
652
674
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
675
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
653
676
|
if (topK != null) {
|
|
654
677
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
655
678
|
}
|
|
656
679
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
657
680
|
{
|
|
658
681
|
prompt,
|
|
659
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
682
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
660
683
|
}
|
|
661
684
|
);
|
|
662
685
|
warnings.push(...messageWarnings);
|
|
663
|
-
const strictJsonSchema = (
|
|
686
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
664
687
|
const baseArgs = {
|
|
665
688
|
// model id:
|
|
666
689
|
model: this.modelId,
|
|
@@ -681,7 +704,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
681
704
|
json_schema: {
|
|
682
705
|
schema: responseFormat.schema,
|
|
683
706
|
strict: strictJsonSchema,
|
|
684
|
-
name: (
|
|
707
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
685
708
|
description: responseFormat.description
|
|
686
709
|
}
|
|
687
710
|
} : { type: "json_object" } : void 0,
|
|
@@ -702,7 +725,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
702
725
|
// messages:
|
|
703
726
|
messages
|
|
704
727
|
};
|
|
705
|
-
if (
|
|
728
|
+
if (isReasoningModel) {
|
|
706
729
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
707
730
|
if (baseArgs.temperature != null) {
|
|
708
731
|
baseArgs.temperature = void 0;
|
|
@@ -808,7 +831,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
808
831
|
};
|
|
809
832
|
}
|
|
810
833
|
async doGenerate(options) {
|
|
811
|
-
var _a, _b, _c, _d, _e, _f;
|
|
834
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
812
835
|
const { args: body, warnings } = await this.getArgs(options);
|
|
813
836
|
const {
|
|
814
837
|
responseHeaders,
|
|
@@ -865,7 +888,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
865
888
|
}
|
|
866
889
|
return {
|
|
867
890
|
content,
|
|
868
|
-
finishReason:
|
|
891
|
+
finishReason: {
|
|
892
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
893
|
+
raw: (_g = choice.finish_reason) != null ? _g : void 0
|
|
894
|
+
},
|
|
869
895
|
usage: convertOpenAIChatUsage(response.usage),
|
|
870
896
|
request: { body },
|
|
871
897
|
response: {
|
|
@@ -901,7 +927,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
901
927
|
fetch: this.config.fetch
|
|
902
928
|
});
|
|
903
929
|
const toolCalls = [];
|
|
904
|
-
let finishReason =
|
|
930
|
+
let finishReason = {
|
|
931
|
+
unified: "other",
|
|
932
|
+
raw: void 0
|
|
933
|
+
};
|
|
905
934
|
let usage = void 0;
|
|
906
935
|
let metadataExtracted = false;
|
|
907
936
|
let isActiveText = false;
|
|
@@ -918,13 +947,13 @@ var OpenAIChatLanguageModel = class {
|
|
|
918
947
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
919
948
|
}
|
|
920
949
|
if (!chunk.success) {
|
|
921
|
-
finishReason = "error";
|
|
950
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
922
951
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
923
952
|
return;
|
|
924
953
|
}
|
|
925
954
|
const value = chunk.value;
|
|
926
955
|
if ("error" in value) {
|
|
927
|
-
finishReason = "error";
|
|
956
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
928
957
|
controller.enqueue({ type: "error", error: value.error });
|
|
929
958
|
return;
|
|
930
959
|
}
|
|
@@ -949,7 +978,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
949
978
|
}
|
|
950
979
|
const choice = value.choices[0];
|
|
951
980
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
952
|
-
finishReason =
|
|
981
|
+
finishReason = {
|
|
982
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
983
|
+
raw: choice.finish_reason
|
|
984
|
+
};
|
|
953
985
|
}
|
|
954
986
|
if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
955
987
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
@@ -1238,7 +1270,7 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1238
1270
|
case "tool_calls":
|
|
1239
1271
|
return "tool-calls";
|
|
1240
1272
|
default:
|
|
1241
|
-
return "
|
|
1273
|
+
return "other";
|
|
1242
1274
|
}
|
|
1243
1275
|
}
|
|
1244
1276
|
|
|
@@ -1436,6 +1468,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1436
1468
|
};
|
|
1437
1469
|
}
|
|
1438
1470
|
async doGenerate(options) {
|
|
1471
|
+
var _a;
|
|
1439
1472
|
const { args, warnings } = await this.getArgs(options);
|
|
1440
1473
|
const {
|
|
1441
1474
|
responseHeaders,
|
|
@@ -1463,7 +1496,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1463
1496
|
return {
|
|
1464
1497
|
content: [{ type: "text", text: choice.text }],
|
|
1465
1498
|
usage: convertOpenAICompletionUsage(response.usage),
|
|
1466
|
-
finishReason:
|
|
1499
|
+
finishReason: {
|
|
1500
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1501
|
+
raw: (_a = choice.finish_reason) != null ? _a : void 0
|
|
1502
|
+
},
|
|
1467
1503
|
request: { body: args },
|
|
1468
1504
|
response: {
|
|
1469
1505
|
...getResponseMetadata2(response),
|
|
@@ -1497,7 +1533,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1497
1533
|
abortSignal: options.abortSignal,
|
|
1498
1534
|
fetch: this.config.fetch
|
|
1499
1535
|
});
|
|
1500
|
-
let finishReason =
|
|
1536
|
+
let finishReason = {
|
|
1537
|
+
unified: "other",
|
|
1538
|
+
raw: void 0
|
|
1539
|
+
};
|
|
1501
1540
|
const providerMetadata = { openai: {} };
|
|
1502
1541
|
let usage = void 0;
|
|
1503
1542
|
let isFirstChunk = true;
|
|
@@ -1512,13 +1551,13 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1512
1551
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1513
1552
|
}
|
|
1514
1553
|
if (!chunk.success) {
|
|
1515
|
-
finishReason = "error";
|
|
1554
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1516
1555
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
1517
1556
|
return;
|
|
1518
1557
|
}
|
|
1519
1558
|
const value = chunk.value;
|
|
1520
1559
|
if ("error" in value) {
|
|
1521
|
-
finishReason = "error";
|
|
1560
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1522
1561
|
controller.enqueue({ type: "error", error: value.error });
|
|
1523
1562
|
return;
|
|
1524
1563
|
}
|
|
@@ -1535,7 +1574,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1535
1574
|
}
|
|
1536
1575
|
const choice = value.choices[0];
|
|
1537
1576
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1538
|
-
finishReason =
|
|
1577
|
+
finishReason = {
|
|
1578
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1579
|
+
raw: choice.finish_reason
|
|
1580
|
+
};
|
|
1539
1581
|
}
|
|
1540
1582
|
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1541
1583
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -1678,7 +1720,11 @@ var OpenAIEmbeddingModel = class {
|
|
|
1678
1720
|
// src/image/openai-image-model.ts
|
|
1679
1721
|
import {
|
|
1680
1722
|
combineHeaders as combineHeaders4,
|
|
1723
|
+
convertBase64ToUint8Array,
|
|
1724
|
+
convertToFormData,
|
|
1681
1725
|
createJsonResponseHandler as createJsonResponseHandler4,
|
|
1726
|
+
downloadBlob,
|
|
1727
|
+
postFormDataToApi,
|
|
1682
1728
|
postJsonToApi as postJsonToApi4
|
|
1683
1729
|
} from "@ai-sdk/provider-utils";
|
|
1684
1730
|
|
|
@@ -1717,11 +1763,13 @@ var modelMaxImagesPerCall = {
|
|
|
1717
1763
|
"dall-e-3": 1,
|
|
1718
1764
|
"dall-e-2": 10,
|
|
1719
1765
|
"gpt-image-1": 10,
|
|
1720
|
-
"gpt-image-1-mini": 10
|
|
1766
|
+
"gpt-image-1-mini": 10,
|
|
1767
|
+
"gpt-image-1.5": 10
|
|
1721
1768
|
};
|
|
1722
1769
|
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1723
1770
|
"gpt-image-1",
|
|
1724
|
-
"gpt-image-1-mini"
|
|
1771
|
+
"gpt-image-1-mini",
|
|
1772
|
+
"gpt-image-1.5"
|
|
1725
1773
|
]);
|
|
1726
1774
|
|
|
1727
1775
|
// src/image/openai-image-model.ts
|
|
@@ -1740,6 +1788,8 @@ var OpenAIImageModel = class {
|
|
|
1740
1788
|
}
|
|
1741
1789
|
async doGenerate({
|
|
1742
1790
|
prompt,
|
|
1791
|
+
files,
|
|
1792
|
+
mask,
|
|
1743
1793
|
n,
|
|
1744
1794
|
size,
|
|
1745
1795
|
aspectRatio,
|
|
@@ -1748,7 +1798,7 @@ var OpenAIImageModel = class {
|
|
|
1748
1798
|
headers,
|
|
1749
1799
|
abortSignal
|
|
1750
1800
|
}) {
|
|
1751
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1801
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1752
1802
|
const warnings = [];
|
|
1753
1803
|
if (aspectRatio != null) {
|
|
1754
1804
|
warnings.push({
|
|
@@ -1761,6 +1811,72 @@ var OpenAIImageModel = class {
|
|
|
1761
1811
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1762
1812
|
}
|
|
1763
1813
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1814
|
+
if (files != null) {
|
|
1815
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await postFormDataToApi({
|
|
1816
|
+
url: this.config.url({
|
|
1817
|
+
path: "/images/edits",
|
|
1818
|
+
modelId: this.modelId
|
|
1819
|
+
}),
|
|
1820
|
+
headers: combineHeaders4(this.config.headers(), headers),
|
|
1821
|
+
formData: convertToFormData({
|
|
1822
|
+
model: this.modelId,
|
|
1823
|
+
prompt,
|
|
1824
|
+
image: await Promise.all(
|
|
1825
|
+
files.map(
|
|
1826
|
+
(file) => file.type === "file" ? new Blob(
|
|
1827
|
+
[
|
|
1828
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1829
|
+
type: file.mediaType
|
|
1830
|
+
}) : new Blob([convertBase64ToUint8Array(file.data)], {
|
|
1831
|
+
type: file.mediaType
|
|
1832
|
+
})
|
|
1833
|
+
],
|
|
1834
|
+
{ type: file.mediaType }
|
|
1835
|
+
) : downloadBlob(file.url)
|
|
1836
|
+
)
|
|
1837
|
+
),
|
|
1838
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1839
|
+
n,
|
|
1840
|
+
size,
|
|
1841
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1842
|
+
}),
|
|
1843
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1844
|
+
successfulResponseHandler: createJsonResponseHandler4(
|
|
1845
|
+
openaiImageResponseSchema
|
|
1846
|
+
),
|
|
1847
|
+
abortSignal,
|
|
1848
|
+
fetch: this.config.fetch
|
|
1849
|
+
});
|
|
1850
|
+
return {
|
|
1851
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1852
|
+
warnings,
|
|
1853
|
+
usage: response2.usage != null ? {
|
|
1854
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1855
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1856
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1857
|
+
} : void 0,
|
|
1858
|
+
response: {
|
|
1859
|
+
timestamp: currentDate,
|
|
1860
|
+
modelId: this.modelId,
|
|
1861
|
+
headers: responseHeaders2
|
|
1862
|
+
},
|
|
1863
|
+
providerMetadata: {
|
|
1864
|
+
openai: {
|
|
1865
|
+
images: response2.data.map((item) => {
|
|
1866
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1867
|
+
return {
|
|
1868
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1869
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1870
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1871
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1872
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1873
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1874
|
+
};
|
|
1875
|
+
})
|
|
1876
|
+
}
|
|
1877
|
+
}
|
|
1878
|
+
};
|
|
1879
|
+
}
|
|
1764
1880
|
const { value: response, responseHeaders } = await postJsonToApi4({
|
|
1765
1881
|
url: this.config.url({
|
|
1766
1882
|
path: "/images/generations",
|
|
@@ -1772,7 +1888,7 @@ var OpenAIImageModel = class {
|
|
|
1772
1888
|
prompt,
|
|
1773
1889
|
n,
|
|
1774
1890
|
size,
|
|
1775
|
-
...(
|
|
1891
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1776
1892
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1777
1893
|
},
|
|
1778
1894
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1786,9 +1902,9 @@ var OpenAIImageModel = class {
|
|
|
1786
1902
|
images: response.data.map((item) => item.b64_json),
|
|
1787
1903
|
warnings,
|
|
1788
1904
|
usage: response.usage != null ? {
|
|
1789
|
-
inputTokens: (
|
|
1790
|
-
outputTokens: (
|
|
1791
|
-
totalTokens: (
|
|
1905
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1906
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1907
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1792
1908
|
} : void 0,
|
|
1793
1909
|
response: {
|
|
1794
1910
|
timestamp: currentDate,
|
|
@@ -1813,6 +1929,14 @@ var OpenAIImageModel = class {
|
|
|
1813
1929
|
};
|
|
1814
1930
|
}
|
|
1815
1931
|
};
|
|
1932
|
+
async function fileToBlob(file) {
|
|
1933
|
+
if (!file) return void 0;
|
|
1934
|
+
if (file.type === "url") {
|
|
1935
|
+
return downloadBlob(file.url);
|
|
1936
|
+
}
|
|
1937
|
+
const data = file.data instanceof Uint8Array ? file.data : convertBase64ToUint8Array(file.data);
|
|
1938
|
+
return new Blob([data], { type: file.mediaType });
|
|
1939
|
+
}
|
|
1816
1940
|
|
|
1817
1941
|
// src/tool/apply-patch.ts
|
|
1818
1942
|
import {
|
|
@@ -2210,16 +2334,14 @@ var mcpArgsSchema = lazySchema16(
|
|
|
2210
2334
|
authorization: z17.string().optional(),
|
|
2211
2335
|
connectorId: z17.string().optional(),
|
|
2212
2336
|
headers: z17.record(z17.string(), z17.string()).optional(),
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
// ])
|
|
2222
|
-
// .optional(),
|
|
2337
|
+
requireApproval: z17.union([
|
|
2338
|
+
z17.enum(["always", "never"]),
|
|
2339
|
+
z17.object({
|
|
2340
|
+
never: z17.object({
|
|
2341
|
+
toolNames: z17.array(z17.string()).optional()
|
|
2342
|
+
}).optional()
|
|
2343
|
+
})
|
|
2344
|
+
]).optional(),
|
|
2223
2345
|
serverDescription: z17.string().optional(),
|
|
2224
2346
|
serverUrl: z17.string().optional()
|
|
2225
2347
|
}).refine(
|
|
@@ -2231,36 +2353,14 @@ var mcpArgsSchema = lazySchema16(
|
|
|
2231
2353
|
var mcpInputSchema = lazySchema16(() => zodSchema16(z17.object({})));
|
|
2232
2354
|
var mcpOutputSchema = lazySchema16(
|
|
2233
2355
|
() => zodSchema16(
|
|
2234
|
-
z17.
|
|
2235
|
-
z17.
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
}),
|
|
2243
|
-
z17.object({
|
|
2244
|
-
type: z17.literal("listTools"),
|
|
2245
|
-
serverLabel: z17.string(),
|
|
2246
|
-
tools: z17.array(
|
|
2247
|
-
z17.object({
|
|
2248
|
-
name: z17.string(),
|
|
2249
|
-
description: z17.string().optional(),
|
|
2250
|
-
inputSchema: jsonValueSchema,
|
|
2251
|
-
annotations: z17.record(z17.string(), jsonValueSchema).optional()
|
|
2252
|
-
})
|
|
2253
|
-
),
|
|
2254
|
-
error: z17.union([z17.string(), jsonValueSchema]).optional()
|
|
2255
|
-
}),
|
|
2256
|
-
z17.object({
|
|
2257
|
-
type: z17.literal("approvalRequest"),
|
|
2258
|
-
serverLabel: z17.string(),
|
|
2259
|
-
name: z17.string(),
|
|
2260
|
-
arguments: z17.string(),
|
|
2261
|
-
approvalRequestId: z17.string()
|
|
2262
|
-
})
|
|
2263
|
-
])
|
|
2356
|
+
z17.object({
|
|
2357
|
+
type: z17.literal("call"),
|
|
2358
|
+
serverLabel: z17.string(),
|
|
2359
|
+
name: z17.string(),
|
|
2360
|
+
arguments: z17.string(),
|
|
2361
|
+
output: z17.string().nullable().optional(),
|
|
2362
|
+
error: z17.union([z17.string(), jsonValueSchema]).optional()
|
|
2363
|
+
})
|
|
2264
2364
|
)
|
|
2265
2365
|
);
|
|
2266
2366
|
var mcpToolFactory = createProviderToolFactoryWithOutputSchema9({
|
|
@@ -2669,6 +2769,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2669
2769
|
}
|
|
2670
2770
|
case "tool": {
|
|
2671
2771
|
for (const part of content) {
|
|
2772
|
+
if (part.type === "tool-approval-response") {
|
|
2773
|
+
continue;
|
|
2774
|
+
}
|
|
2672
2775
|
const output = part.output;
|
|
2673
2776
|
const resolvedToolName = toolNameMapping.toProviderToolName(
|
|
2674
2777
|
part.toolName
|
|
@@ -2683,7 +2786,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2683
2786
|
call_id: part.toolCallId,
|
|
2684
2787
|
output: parsedOutput.output
|
|
2685
2788
|
});
|
|
2686
|
-
|
|
2789
|
+
continue;
|
|
2687
2790
|
}
|
|
2688
2791
|
if (hasShellTool && resolvedToolName === "shell" && output.type === "json") {
|
|
2689
2792
|
const parsedOutput = await validateTypes({
|
|
@@ -2702,7 +2805,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2702
2805
|
}
|
|
2703
2806
|
}))
|
|
2704
2807
|
});
|
|
2705
|
-
|
|
2808
|
+
continue;
|
|
2706
2809
|
}
|
|
2707
2810
|
if (hasApplyPatchTool && part.toolName === "apply_patch" && output.type === "json") {
|
|
2708
2811
|
const parsedOutput = await validateTypes({
|
|
@@ -2715,7 +2818,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2715
2818
|
status: parsedOutput.status,
|
|
2716
2819
|
output: parsedOutput.output
|
|
2717
2820
|
});
|
|
2718
|
-
|
|
2821
|
+
continue;
|
|
2719
2822
|
}
|
|
2720
2823
|
let contentValue;
|
|
2721
2824
|
switch (output.type) {
|
|
@@ -2796,7 +2899,7 @@ function mapOpenAIResponseFinishReason({
|
|
|
2796
2899
|
case "content_filter":
|
|
2797
2900
|
return "content-filter";
|
|
2798
2901
|
default:
|
|
2799
|
-
return hasFunctionCall ? "tool-calls" : "
|
|
2902
|
+
return hasFunctionCall ? "tool-calls" : "other";
|
|
2800
2903
|
}
|
|
2801
2904
|
}
|
|
2802
2905
|
|
|
@@ -3195,6 +3298,19 @@ var openaiResponsesChunkSchema = lazySchema17(
|
|
|
3195
3298
|
item_id: z19.string(),
|
|
3196
3299
|
summary_index: z19.number()
|
|
3197
3300
|
}),
|
|
3301
|
+
z19.object({
|
|
3302
|
+
type: z19.literal("response.apply_patch_call_operation_diff.delta"),
|
|
3303
|
+
item_id: z19.string(),
|
|
3304
|
+
output_index: z19.number(),
|
|
3305
|
+
delta: z19.string(),
|
|
3306
|
+
obfuscation: z19.string().nullish()
|
|
3307
|
+
}),
|
|
3308
|
+
z19.object({
|
|
3309
|
+
type: z19.literal("response.apply_patch_call_operation_diff.done"),
|
|
3310
|
+
item_id: z19.string(),
|
|
3311
|
+
output_index: z19.number(),
|
|
3312
|
+
diff: z19.string()
|
|
3313
|
+
}),
|
|
3198
3314
|
z19.object({
|
|
3199
3315
|
type: z19.literal("error"),
|
|
3200
3316
|
sequence_number: z19.number(),
|
|
@@ -3675,7 +3791,26 @@ var openaiResponsesProviderOptionsSchema = lazySchema18(
|
|
|
3675
3791
|
* Defaults to `undefined`.
|
|
3676
3792
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3677
3793
|
*/
|
|
3678
|
-
user: z20.string().nullish()
|
|
3794
|
+
user: z20.string().nullish(),
|
|
3795
|
+
/**
|
|
3796
|
+
* Override the system message mode for this model.
|
|
3797
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3798
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3799
|
+
* - 'remove': Remove system messages entirely
|
|
3800
|
+
*
|
|
3801
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3802
|
+
*/
|
|
3803
|
+
systemMessageMode: z20.enum(["system", "developer", "remove"]).optional(),
|
|
3804
|
+
/**
|
|
3805
|
+
* Force treating this model as a reasoning model.
|
|
3806
|
+
*
|
|
3807
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3808
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3809
|
+
*
|
|
3810
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3811
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3812
|
+
*/
|
|
3813
|
+
forceReasoning: z20.boolean().optional()
|
|
3679
3814
|
})
|
|
3680
3815
|
)
|
|
3681
3816
|
);
|
|
@@ -3888,7 +4023,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3888
4023
|
toolChoice,
|
|
3889
4024
|
responseFormat
|
|
3890
4025
|
}) {
|
|
3891
|
-
var _a, _b, _c, _d;
|
|
4026
|
+
var _a, _b, _c, _d, _e, _f;
|
|
3892
4027
|
const warnings = [];
|
|
3893
4028
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
3894
4029
|
if (topK != null) {
|
|
@@ -3911,6 +4046,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3911
4046
|
providerOptions,
|
|
3912
4047
|
schema: openaiResponsesProviderOptionsSchema
|
|
3913
4048
|
});
|
|
4049
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
3914
4050
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3915
4051
|
warnings.push({
|
|
3916
4052
|
type: "unsupported",
|
|
@@ -3935,15 +4071,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3935
4071
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3936
4072
|
prompt,
|
|
3937
4073
|
toolNameMapping,
|
|
3938
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4074
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
3939
4075
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
3940
|
-
store: (
|
|
4076
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
3941
4077
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
3942
4078
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
3943
4079
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
3944
4080
|
});
|
|
3945
4081
|
warnings.push(...inputWarnings);
|
|
3946
|
-
const strictJsonSchema = (
|
|
4082
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
3947
4083
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
3948
4084
|
function addInclude(key) {
|
|
3949
4085
|
if (include == null) {
|
|
@@ -3959,9 +4095,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3959
4095
|
if (topLogprobs) {
|
|
3960
4096
|
addInclude("message.output_text.logprobs");
|
|
3961
4097
|
}
|
|
3962
|
-
const webSearchToolName = (
|
|
4098
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
3963
4099
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
3964
|
-
)) == null ? void 0 :
|
|
4100
|
+
)) == null ? void 0 : _e.name;
|
|
3965
4101
|
if (webSearchToolName) {
|
|
3966
4102
|
addInclude("web_search_call.action.sources");
|
|
3967
4103
|
}
|
|
@@ -3969,7 +4105,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3969
4105
|
addInclude("code_interpreter_call.outputs");
|
|
3970
4106
|
}
|
|
3971
4107
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
3972
|
-
if (store === false &&
|
|
4108
|
+
if (store === false && isReasoningModel) {
|
|
3973
4109
|
addInclude("reasoning.encrypted_content");
|
|
3974
4110
|
}
|
|
3975
4111
|
const baseArgs = {
|
|
@@ -3984,7 +4120,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3984
4120
|
format: responseFormat.schema != null ? {
|
|
3985
4121
|
type: "json_schema",
|
|
3986
4122
|
strict: strictJsonSchema,
|
|
3987
|
-
name: (
|
|
4123
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
3988
4124
|
description: responseFormat.description,
|
|
3989
4125
|
schema: responseFormat.schema
|
|
3990
4126
|
} : { type: "json_object" }
|
|
@@ -4011,7 +4147,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4011
4147
|
top_logprobs: topLogprobs,
|
|
4012
4148
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4013
4149
|
// model-specific settings:
|
|
4014
|
-
...
|
|
4150
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4015
4151
|
reasoning: {
|
|
4016
4152
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
4017
4153
|
effort: openaiOptions.reasoningEffort
|
|
@@ -4022,7 +4158,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4022
4158
|
}
|
|
4023
4159
|
}
|
|
4024
4160
|
};
|
|
4025
|
-
if (
|
|
4161
|
+
if (isReasoningModel) {
|
|
4026
4162
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
4027
4163
|
if (baseArgs.temperature != null) {
|
|
4028
4164
|
baseArgs.temperature = void 0;
|
|
@@ -4094,7 +4230,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4094
4230
|
};
|
|
4095
4231
|
}
|
|
4096
4232
|
async doGenerate(options) {
|
|
4097
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
4233
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z;
|
|
4098
4234
|
const {
|
|
4099
4235
|
args: body,
|
|
4100
4236
|
warnings,
|
|
@@ -4345,54 +4481,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4345
4481
|
break;
|
|
4346
4482
|
}
|
|
4347
4483
|
case "mcp_list_tools": {
|
|
4348
|
-
content.push({
|
|
4349
|
-
type: "tool-call",
|
|
4350
|
-
toolCallId: part.id,
|
|
4351
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4352
|
-
input: JSON.stringify({}),
|
|
4353
|
-
providerExecuted: true
|
|
4354
|
-
});
|
|
4355
|
-
content.push({
|
|
4356
|
-
type: "tool-result",
|
|
4357
|
-
toolCallId: part.id,
|
|
4358
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4359
|
-
result: {
|
|
4360
|
-
type: "listTools",
|
|
4361
|
-
serverLabel: part.server_label,
|
|
4362
|
-
tools: part.tools.map((t) => {
|
|
4363
|
-
var _a2, _b2;
|
|
4364
|
-
return {
|
|
4365
|
-
name: t.name,
|
|
4366
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
4367
|
-
inputSchema: t.input_schema,
|
|
4368
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
4369
|
-
};
|
|
4370
|
-
}),
|
|
4371
|
-
...part.error != null ? { error: part.error } : {}
|
|
4372
|
-
}
|
|
4373
|
-
});
|
|
4374
4484
|
break;
|
|
4375
4485
|
}
|
|
4376
4486
|
case "mcp_approval_request": {
|
|
4377
|
-
content.push({
|
|
4378
|
-
type: "tool-call",
|
|
4379
|
-
toolCallId: part.id,
|
|
4380
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4381
|
-
input: JSON.stringify({}),
|
|
4382
|
-
providerExecuted: true
|
|
4383
|
-
});
|
|
4384
|
-
content.push({
|
|
4385
|
-
type: "tool-result",
|
|
4386
|
-
toolCallId: part.id,
|
|
4387
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4388
|
-
result: {
|
|
4389
|
-
type: "approvalRequest",
|
|
4390
|
-
serverLabel: part.server_label,
|
|
4391
|
-
name: part.name,
|
|
4392
|
-
arguments: part.arguments,
|
|
4393
|
-
approvalRequestId: part.approval_request_id
|
|
4394
|
-
}
|
|
4395
|
-
});
|
|
4396
4487
|
break;
|
|
4397
4488
|
}
|
|
4398
4489
|
case "computer_call": {
|
|
@@ -4491,10 +4582,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4491
4582
|
const usage = response.usage;
|
|
4492
4583
|
return {
|
|
4493
4584
|
content,
|
|
4494
|
-
finishReason:
|
|
4495
|
-
|
|
4496
|
-
|
|
4497
|
-
|
|
4585
|
+
finishReason: {
|
|
4586
|
+
unified: mapOpenAIResponseFinishReason({
|
|
4587
|
+
finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
|
|
4588
|
+
hasFunctionCall
|
|
4589
|
+
}),
|
|
4590
|
+
raw: (_z = (_y = response.incomplete_details) == null ? void 0 : _y.reason) != null ? _z : void 0
|
|
4591
|
+
},
|
|
4498
4592
|
usage: convertOpenAIResponsesUsage(usage),
|
|
4499
4593
|
request: { body },
|
|
4500
4594
|
response: {
|
|
@@ -4535,7 +4629,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4535
4629
|
});
|
|
4536
4630
|
const self = this;
|
|
4537
4631
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4538
|
-
let finishReason =
|
|
4632
|
+
let finishReason = {
|
|
4633
|
+
unified: "other",
|
|
4634
|
+
raw: void 0
|
|
4635
|
+
};
|
|
4539
4636
|
let usage = void 0;
|
|
4540
4637
|
const logprobs = [];
|
|
4541
4638
|
let responseId = null;
|
|
@@ -4551,12 +4648,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4551
4648
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4552
4649
|
},
|
|
4553
4650
|
transform(chunk, controller) {
|
|
4554
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
|
|
4651
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C;
|
|
4555
4652
|
if (options.includeRawChunks) {
|
|
4556
4653
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4557
4654
|
}
|
|
4558
4655
|
if (!chunk.success) {
|
|
4559
|
-
finishReason = "error";
|
|
4656
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
4560
4657
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
4561
4658
|
return;
|
|
4562
4659
|
}
|
|
@@ -4655,24 +4752,40 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4655
4752
|
providerExecuted: true
|
|
4656
4753
|
});
|
|
4657
4754
|
} else if (value.item.type === "apply_patch_call") {
|
|
4755
|
+
const { call_id: callId, operation } = value.item;
|
|
4658
4756
|
ongoingToolCalls[value.output_index] = {
|
|
4659
4757
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
4660
|
-
toolCallId:
|
|
4758
|
+
toolCallId: callId,
|
|
4759
|
+
applyPatch: {
|
|
4760
|
+
// delete_file doesn't have diff
|
|
4761
|
+
hasDiff: operation.type === "delete_file",
|
|
4762
|
+
endEmitted: operation.type === "delete_file"
|
|
4763
|
+
}
|
|
4661
4764
|
};
|
|
4662
|
-
|
|
4765
|
+
controller.enqueue({
|
|
4766
|
+
type: "tool-input-start",
|
|
4767
|
+
id: callId,
|
|
4768
|
+
toolName: toolNameMapping.toCustomToolName("apply_patch")
|
|
4769
|
+
});
|
|
4770
|
+
if (operation.type === "delete_file") {
|
|
4771
|
+
const inputString = JSON.stringify({
|
|
4772
|
+
callId,
|
|
4773
|
+
operation
|
|
4774
|
+
});
|
|
4663
4775
|
controller.enqueue({
|
|
4664
|
-
type: "tool-
|
|
4665
|
-
|
|
4666
|
-
|
|
4667
|
-
|
|
4668
|
-
|
|
4669
|
-
|
|
4670
|
-
|
|
4671
|
-
|
|
4672
|
-
|
|
4673
|
-
|
|
4674
|
-
|
|
4675
|
-
|
|
4776
|
+
type: "tool-input-delta",
|
|
4777
|
+
id: callId,
|
|
4778
|
+
delta: inputString
|
|
4779
|
+
});
|
|
4780
|
+
controller.enqueue({
|
|
4781
|
+
type: "tool-input-end",
|
|
4782
|
+
id: callId
|
|
4783
|
+
});
|
|
4784
|
+
} else {
|
|
4785
|
+
controller.enqueue({
|
|
4786
|
+
type: "tool-input-delta",
|
|
4787
|
+
id: callId,
|
|
4788
|
+
delta: `{"callId":"${escapeJSONDelta(callId)}","operation":{"type":"${escapeJSONDelta(operation.type)}","path":"${escapeJSONDelta(operation.path)}","diff":"`
|
|
4676
4789
|
});
|
|
4677
4790
|
}
|
|
4678
4791
|
} else if (value.item.type === "shell_call") {
|
|
@@ -4824,31 +4937,31 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4824
4937
|
});
|
|
4825
4938
|
} else if (value.item.type === "mcp_list_tools") {
|
|
4826
4939
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4827
|
-
controller.enqueue({
|
|
4828
|
-
type: "tool-result",
|
|
4829
|
-
toolCallId: value.item.id,
|
|
4830
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4831
|
-
result: {
|
|
4832
|
-
type: "listTools",
|
|
4833
|
-
serverLabel: value.item.server_label,
|
|
4834
|
-
tools: value.item.tools.map((t) => {
|
|
4835
|
-
var _a2, _b2;
|
|
4836
|
-
return {
|
|
4837
|
-
name: t.name,
|
|
4838
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
4839
|
-
inputSchema: t.input_schema,
|
|
4840
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
4841
|
-
};
|
|
4842
|
-
}),
|
|
4843
|
-
...value.item.error != null ? { error: value.item.error } : {}
|
|
4844
|
-
}
|
|
4845
|
-
});
|
|
4846
4940
|
} else if (value.item.type === "apply_patch_call") {
|
|
4847
|
-
ongoingToolCalls[value.output_index]
|
|
4848
|
-
if (value.item.
|
|
4941
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
4942
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted && value.item.operation.type !== "delete_file") {
|
|
4943
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
4944
|
+
controller.enqueue({
|
|
4945
|
+
type: "tool-input-delta",
|
|
4946
|
+
id: toolCall.toolCallId,
|
|
4947
|
+
delta: escapeJSONDelta(value.item.operation.diff)
|
|
4948
|
+
});
|
|
4949
|
+
}
|
|
4950
|
+
controller.enqueue({
|
|
4951
|
+
type: "tool-input-delta",
|
|
4952
|
+
id: toolCall.toolCallId,
|
|
4953
|
+
delta: '"}}'
|
|
4954
|
+
});
|
|
4955
|
+
controller.enqueue({
|
|
4956
|
+
type: "tool-input-end",
|
|
4957
|
+
id: toolCall.toolCallId
|
|
4958
|
+
});
|
|
4959
|
+
toolCall.applyPatch.endEmitted = true;
|
|
4960
|
+
}
|
|
4961
|
+
if (toolCall && value.item.status === "completed") {
|
|
4849
4962
|
controller.enqueue({
|
|
4850
4963
|
type: "tool-call",
|
|
4851
|
-
toolCallId:
|
|
4964
|
+
toolCallId: toolCall.toolCallId,
|
|
4852
4965
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
4853
4966
|
input: JSON.stringify({
|
|
4854
4967
|
callId: value.item.call_id,
|
|
@@ -4861,20 +4974,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4861
4974
|
}
|
|
4862
4975
|
});
|
|
4863
4976
|
}
|
|
4977
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
4864
4978
|
} else if (value.item.type === "mcp_approval_request") {
|
|
4865
4979
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4866
|
-
controller.enqueue({
|
|
4867
|
-
type: "tool-result",
|
|
4868
|
-
toolCallId: value.item.id,
|
|
4869
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4870
|
-
result: {
|
|
4871
|
-
type: "approvalRequest",
|
|
4872
|
-
serverLabel: value.item.server_label,
|
|
4873
|
-
name: value.item.name,
|
|
4874
|
-
arguments: value.item.arguments,
|
|
4875
|
-
approvalRequestId: value.item.approval_request_id
|
|
4876
|
-
}
|
|
4877
|
-
});
|
|
4878
4980
|
} else if (value.item.type === "local_shell_call") {
|
|
4879
4981
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4880
4982
|
controller.enqueue({
|
|
@@ -4940,6 +5042,38 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4940
5042
|
delta: value.delta
|
|
4941
5043
|
});
|
|
4942
5044
|
}
|
|
5045
|
+
} else if (isResponseApplyPatchCallOperationDiffDeltaChunk(value)) {
|
|
5046
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
5047
|
+
if (toolCall == null ? void 0 : toolCall.applyPatch) {
|
|
5048
|
+
controller.enqueue({
|
|
5049
|
+
type: "tool-input-delta",
|
|
5050
|
+
id: toolCall.toolCallId,
|
|
5051
|
+
delta: escapeJSONDelta(value.delta)
|
|
5052
|
+
});
|
|
5053
|
+
toolCall.applyPatch.hasDiff = true;
|
|
5054
|
+
}
|
|
5055
|
+
} else if (isResponseApplyPatchCallOperationDiffDoneChunk(value)) {
|
|
5056
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
5057
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted) {
|
|
5058
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
5059
|
+
controller.enqueue({
|
|
5060
|
+
type: "tool-input-delta",
|
|
5061
|
+
id: toolCall.toolCallId,
|
|
5062
|
+
delta: escapeJSONDelta(value.diff)
|
|
5063
|
+
});
|
|
5064
|
+
toolCall.applyPatch.hasDiff = true;
|
|
5065
|
+
}
|
|
5066
|
+
controller.enqueue({
|
|
5067
|
+
type: "tool-input-delta",
|
|
5068
|
+
id: toolCall.toolCallId,
|
|
5069
|
+
delta: '"}}'
|
|
5070
|
+
});
|
|
5071
|
+
controller.enqueue({
|
|
5072
|
+
type: "tool-input-end",
|
|
5073
|
+
id: toolCall.toolCallId
|
|
5074
|
+
});
|
|
5075
|
+
toolCall.applyPatch.endEmitted = true;
|
|
5076
|
+
}
|
|
4943
5077
|
} else if (isResponseImageGenerationCallPartialImageChunk(value)) {
|
|
4944
5078
|
controller.enqueue({
|
|
4945
5079
|
type: "tool-result",
|
|
@@ -4956,9 +5090,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4956
5090
|
controller.enqueue({
|
|
4957
5091
|
type: "tool-input-delta",
|
|
4958
5092
|
id: toolCall.toolCallId,
|
|
4959
|
-
|
|
4960
|
-
// To escape it, we use JSON.stringify and slice to remove the outer quotes.
|
|
4961
|
-
delta: JSON.stringify(value.delta).slice(1, -1)
|
|
5093
|
+
delta: escapeJSONDelta(value.delta)
|
|
4962
5094
|
});
|
|
4963
5095
|
}
|
|
4964
5096
|
} else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
|
|
@@ -5055,10 +5187,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5055
5187
|
activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
|
|
5056
5188
|
}
|
|
5057
5189
|
} else if (isResponseFinishedChunk(value)) {
|
|
5058
|
-
finishReason =
|
|
5059
|
-
|
|
5060
|
-
|
|
5061
|
-
|
|
5190
|
+
finishReason = {
|
|
5191
|
+
unified: mapOpenAIResponseFinishReason({
|
|
5192
|
+
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
5193
|
+
hasFunctionCall
|
|
5194
|
+
}),
|
|
5195
|
+
raw: (_k = (_j = value.response.incomplete_details) == null ? void 0 : _j.reason) != null ? _k : void 0
|
|
5196
|
+
};
|
|
5062
5197
|
usage = value.response.usage;
|
|
5063
5198
|
if (typeof value.response.service_tier === "string") {
|
|
5064
5199
|
serviceTier = value.response.service_tier;
|
|
@@ -5069,7 +5204,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5069
5204
|
controller.enqueue({
|
|
5070
5205
|
type: "source",
|
|
5071
5206
|
sourceType: "url",
|
|
5072
|
-
id: (
|
|
5207
|
+
id: (_n = (_m = (_l = self.config).generateId) == null ? void 0 : _m.call(_l)) != null ? _n : generateId2(),
|
|
5073
5208
|
url: value.annotation.url,
|
|
5074
5209
|
title: value.annotation.title
|
|
5075
5210
|
});
|
|
@@ -5077,10 +5212,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5077
5212
|
controller.enqueue({
|
|
5078
5213
|
type: "source",
|
|
5079
5214
|
sourceType: "document",
|
|
5080
|
-
id: (
|
|
5215
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
|
|
5081
5216
|
mediaType: "text/plain",
|
|
5082
|
-
title: (
|
|
5083
|
-
filename: (
|
|
5217
|
+
title: (_s = (_r = value.annotation.quote) != null ? _r : value.annotation.filename) != null ? _s : "Document",
|
|
5218
|
+
filename: (_t = value.annotation.filename) != null ? _t : value.annotation.file_id,
|
|
5084
5219
|
...value.annotation.file_id ? {
|
|
5085
5220
|
providerMetadata: {
|
|
5086
5221
|
[providerKey]: {
|
|
@@ -5093,10 +5228,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5093
5228
|
controller.enqueue({
|
|
5094
5229
|
type: "source",
|
|
5095
5230
|
sourceType: "document",
|
|
5096
|
-
id: (
|
|
5231
|
+
id: (_w = (_v = (_u = self.config).generateId) == null ? void 0 : _v.call(_u)) != null ? _w : generateId2(),
|
|
5097
5232
|
mediaType: "text/plain",
|
|
5098
|
-
title: (
|
|
5099
|
-
filename: (
|
|
5233
|
+
title: (_y = (_x = value.annotation.filename) != null ? _x : value.annotation.file_id) != null ? _y : "Document",
|
|
5234
|
+
filename: (_z = value.annotation.filename) != null ? _z : value.annotation.file_id,
|
|
5100
5235
|
providerMetadata: {
|
|
5101
5236
|
[providerKey]: {
|
|
5102
5237
|
fileId: value.annotation.file_id,
|
|
@@ -5109,7 +5244,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5109
5244
|
controller.enqueue({
|
|
5110
5245
|
type: "source",
|
|
5111
5246
|
sourceType: "document",
|
|
5112
|
-
id: (
|
|
5247
|
+
id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : generateId2(),
|
|
5113
5248
|
mediaType: "application/octet-stream",
|
|
5114
5249
|
title: value.annotation.file_id,
|
|
5115
5250
|
filename: value.annotation.file_id,
|
|
@@ -5175,6 +5310,12 @@ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
|
|
|
5175
5310
|
function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
|
|
5176
5311
|
return chunk.type === "response.code_interpreter_call_code.done";
|
|
5177
5312
|
}
|
|
5313
|
+
function isResponseApplyPatchCallOperationDiffDeltaChunk(chunk) {
|
|
5314
|
+
return chunk.type === "response.apply_patch_call_operation_diff.delta";
|
|
5315
|
+
}
|
|
5316
|
+
function isResponseApplyPatchCallOperationDiffDoneChunk(chunk) {
|
|
5317
|
+
return chunk.type === "response.apply_patch_call_operation_diff.done";
|
|
5318
|
+
}
|
|
5178
5319
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
5179
5320
|
return chunk.type === "response.output_item.added";
|
|
5180
5321
|
}
|
|
@@ -5205,6 +5346,9 @@ function mapWebSearchOutput(action) {
|
|
|
5205
5346
|
};
|
|
5206
5347
|
}
|
|
5207
5348
|
}
|
|
5349
|
+
function escapeJSONDelta(delta) {
|
|
5350
|
+
return JSON.stringify(delta).slice(1, -1);
|
|
5351
|
+
}
|
|
5208
5352
|
|
|
5209
5353
|
// src/speech/openai-speech-model.ts
|
|
5210
5354
|
import {
|
|
@@ -5330,11 +5474,11 @@ var OpenAISpeechModel = class {
|
|
|
5330
5474
|
// src/transcription/openai-transcription-model.ts
|
|
5331
5475
|
import {
|
|
5332
5476
|
combineHeaders as combineHeaders7,
|
|
5333
|
-
convertBase64ToUint8Array,
|
|
5477
|
+
convertBase64ToUint8Array as convertBase64ToUint8Array2,
|
|
5334
5478
|
createJsonResponseHandler as createJsonResponseHandler6,
|
|
5335
5479
|
mediaTypeToExtension,
|
|
5336
5480
|
parseProviderOptions as parseProviderOptions7,
|
|
5337
|
-
postFormDataToApi
|
|
5481
|
+
postFormDataToApi as postFormDataToApi2
|
|
5338
5482
|
} from "@ai-sdk/provider-utils";
|
|
5339
5483
|
|
|
5340
5484
|
// src/transcription/openai-transcription-api.ts
|
|
@@ -5484,7 +5628,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5484
5628
|
schema: openAITranscriptionProviderOptions
|
|
5485
5629
|
});
|
|
5486
5630
|
const formData = new FormData();
|
|
5487
|
-
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([
|
|
5631
|
+
const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array2(audio)]);
|
|
5488
5632
|
formData.append("model", this.modelId);
|
|
5489
5633
|
const fileExtension = mediaTypeToExtension(mediaType);
|
|
5490
5634
|
formData.append(
|
|
@@ -5531,7 +5675,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5531
5675
|
value: response,
|
|
5532
5676
|
responseHeaders,
|
|
5533
5677
|
rawValue: rawResponse
|
|
5534
|
-
} = await
|
|
5678
|
+
} = await postFormDataToApi2({
|
|
5535
5679
|
url: this.config.url({
|
|
5536
5680
|
path: "/audio/transcriptions",
|
|
5537
5681
|
modelId: this.modelId
|
|
@@ -5571,7 +5715,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5571
5715
|
};
|
|
5572
5716
|
|
|
5573
5717
|
// src/version.ts
|
|
5574
|
-
var VERSION = true ? "3.0.0
|
|
5718
|
+
var VERSION = true ? "3.0.0" : "0.0.0-test";
|
|
5575
5719
|
|
|
5576
5720
|
// src/openai-provider.ts
|
|
5577
5721
|
function createOpenAI(options = {}) {
|
|
@@ -5659,6 +5803,8 @@ function createOpenAI(options = {}) {
|
|
|
5659
5803
|
provider.responses = createResponsesModel;
|
|
5660
5804
|
provider.embedding = createEmbeddingModel;
|
|
5661
5805
|
provider.embeddingModel = createEmbeddingModel;
|
|
5806
|
+
provider.textEmbedding = createEmbeddingModel;
|
|
5807
|
+
provider.textEmbeddingModel = createEmbeddingModel;
|
|
5662
5808
|
provider.image = createImageModel;
|
|
5663
5809
|
provider.imageModel = createImageModel;
|
|
5664
5810
|
provider.transcription = createTranscriptionModel;
|