@ai-sdk/openai 3.0.0-beta.99 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +292 -0
- package/dist/index.d.mts +18 -17
- package/dist/index.d.ts +18 -17
- package/dist/index.js +342 -200
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +350 -204
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +12 -10
- package/dist/internal/index.d.ts +12 -10
- package/dist/internal/index.js +339 -199
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +347 -203
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.js
CHANGED
|
@@ -56,8 +56,8 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
|
|
|
56
56
|
function getOpenAILanguageModelCapabilities(modelId) {
|
|
57
57
|
const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
58
58
|
const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
59
|
-
const isReasoningModel =
|
|
60
|
-
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
|
|
59
|
+
const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
|
|
60
|
+
const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
|
|
61
61
|
const systemMessageMode = isReasoningModel ? "developer" : "system";
|
|
62
62
|
return {
|
|
63
63
|
supportsFlexProcessing,
|
|
@@ -256,6 +256,9 @@ function convertToOpenAIChatMessages({
|
|
|
256
256
|
}
|
|
257
257
|
case "tool": {
|
|
258
258
|
for (const toolResponse of content) {
|
|
259
|
+
if (toolResponse.type === "tool-approval-response") {
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
259
262
|
const output = toolResponse.output;
|
|
260
263
|
let contentValue;
|
|
261
264
|
switch (output.type) {
|
|
@@ -315,7 +318,7 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
315
318
|
case "tool_calls":
|
|
316
319
|
return "tool-calls";
|
|
317
320
|
default:
|
|
318
|
-
return "
|
|
321
|
+
return "other";
|
|
319
322
|
}
|
|
320
323
|
}
|
|
321
324
|
|
|
@@ -556,7 +559,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
556
559
|
* username or email address, in order to avoid sending us any identifying
|
|
557
560
|
* information.
|
|
558
561
|
*/
|
|
559
|
-
safetyIdentifier: import_v43.z.string().optional()
|
|
562
|
+
safetyIdentifier: import_v43.z.string().optional(),
|
|
563
|
+
/**
|
|
564
|
+
* Override the system message mode for this model.
|
|
565
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
566
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
567
|
+
* - 'remove': Remove system messages entirely
|
|
568
|
+
*
|
|
569
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
570
|
+
*/
|
|
571
|
+
systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
|
|
572
|
+
/**
|
|
573
|
+
* Force treating this model as a reasoning model.
|
|
574
|
+
*
|
|
575
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
576
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
577
|
+
*
|
|
578
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
579
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
580
|
+
*/
|
|
581
|
+
forceReasoning: import_v43.z.boolean().optional()
|
|
560
582
|
})
|
|
561
583
|
)
|
|
562
584
|
);
|
|
@@ -651,7 +673,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
651
673
|
toolChoice,
|
|
652
674
|
providerOptions
|
|
653
675
|
}) {
|
|
654
|
-
var _a, _b, _c;
|
|
676
|
+
var _a, _b, _c, _d, _e;
|
|
655
677
|
const warnings = [];
|
|
656
678
|
const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
|
|
657
679
|
provider: "openai",
|
|
@@ -659,17 +681,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
659
681
|
schema: openaiChatLanguageModelOptions
|
|
660
682
|
})) != null ? _a : {};
|
|
661
683
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
684
|
+
const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
|
|
662
685
|
if (topK != null) {
|
|
663
686
|
warnings.push({ type: "unsupported", feature: "topK" });
|
|
664
687
|
}
|
|
665
688
|
const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
|
|
666
689
|
{
|
|
667
690
|
prompt,
|
|
668
|
-
systemMessageMode: modelCapabilities.systemMessageMode
|
|
691
|
+
systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
|
|
669
692
|
}
|
|
670
693
|
);
|
|
671
694
|
warnings.push(...messageWarnings);
|
|
672
|
-
const strictJsonSchema = (
|
|
695
|
+
const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
673
696
|
const baseArgs = {
|
|
674
697
|
// model id:
|
|
675
698
|
model: this.modelId,
|
|
@@ -690,7 +713,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
690
713
|
json_schema: {
|
|
691
714
|
schema: responseFormat.schema,
|
|
692
715
|
strict: strictJsonSchema,
|
|
693
|
-
name: (
|
|
716
|
+
name: (_e = responseFormat.name) != null ? _e : "response",
|
|
694
717
|
description: responseFormat.description
|
|
695
718
|
}
|
|
696
719
|
} : { type: "json_object" } : void 0,
|
|
@@ -711,7 +734,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
711
734
|
// messages:
|
|
712
735
|
messages
|
|
713
736
|
};
|
|
714
|
-
if (
|
|
737
|
+
if (isReasoningModel) {
|
|
715
738
|
if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
|
|
716
739
|
if (baseArgs.temperature != null) {
|
|
717
740
|
baseArgs.temperature = void 0;
|
|
@@ -817,7 +840,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
817
840
|
};
|
|
818
841
|
}
|
|
819
842
|
async doGenerate(options) {
|
|
820
|
-
var _a, _b, _c, _d, _e, _f;
|
|
843
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
821
844
|
const { args: body, warnings } = await this.getArgs(options);
|
|
822
845
|
const {
|
|
823
846
|
responseHeaders,
|
|
@@ -874,7 +897,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
874
897
|
}
|
|
875
898
|
return {
|
|
876
899
|
content,
|
|
877
|
-
finishReason:
|
|
900
|
+
finishReason: {
|
|
901
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
902
|
+
raw: (_g = choice.finish_reason) != null ? _g : void 0
|
|
903
|
+
},
|
|
878
904
|
usage: convertOpenAIChatUsage(response.usage),
|
|
879
905
|
request: { body },
|
|
880
906
|
response: {
|
|
@@ -910,7 +936,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
910
936
|
fetch: this.config.fetch
|
|
911
937
|
});
|
|
912
938
|
const toolCalls = [];
|
|
913
|
-
let finishReason =
|
|
939
|
+
let finishReason = {
|
|
940
|
+
unified: "other",
|
|
941
|
+
raw: void 0
|
|
942
|
+
};
|
|
914
943
|
let usage = void 0;
|
|
915
944
|
let metadataExtracted = false;
|
|
916
945
|
let isActiveText = false;
|
|
@@ -927,13 +956,13 @@ var OpenAIChatLanguageModel = class {
|
|
|
927
956
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
928
957
|
}
|
|
929
958
|
if (!chunk.success) {
|
|
930
|
-
finishReason = "error";
|
|
959
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
931
960
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
932
961
|
return;
|
|
933
962
|
}
|
|
934
963
|
const value = chunk.value;
|
|
935
964
|
if ("error" in value) {
|
|
936
|
-
finishReason = "error";
|
|
965
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
937
966
|
controller.enqueue({ type: "error", error: value.error });
|
|
938
967
|
return;
|
|
939
968
|
}
|
|
@@ -958,7 +987,10 @@ var OpenAIChatLanguageModel = class {
|
|
|
958
987
|
}
|
|
959
988
|
const choice = value.choices[0];
|
|
960
989
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
961
|
-
finishReason =
|
|
990
|
+
finishReason = {
|
|
991
|
+
unified: mapOpenAIFinishReason(choice.finish_reason),
|
|
992
|
+
raw: choice.finish_reason
|
|
993
|
+
};
|
|
962
994
|
}
|
|
963
995
|
if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
964
996
|
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
@@ -1238,7 +1270,7 @@ function mapOpenAIFinishReason2(finishReason) {
|
|
|
1238
1270
|
case "tool_calls":
|
|
1239
1271
|
return "tool-calls";
|
|
1240
1272
|
default:
|
|
1241
|
-
return "
|
|
1273
|
+
return "other";
|
|
1242
1274
|
}
|
|
1243
1275
|
}
|
|
1244
1276
|
|
|
@@ -1436,6 +1468,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1436
1468
|
};
|
|
1437
1469
|
}
|
|
1438
1470
|
async doGenerate(options) {
|
|
1471
|
+
var _a;
|
|
1439
1472
|
const { args, warnings } = await this.getArgs(options);
|
|
1440
1473
|
const {
|
|
1441
1474
|
responseHeaders,
|
|
@@ -1463,7 +1496,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1463
1496
|
return {
|
|
1464
1497
|
content: [{ type: "text", text: choice.text }],
|
|
1465
1498
|
usage: convertOpenAICompletionUsage(response.usage),
|
|
1466
|
-
finishReason:
|
|
1499
|
+
finishReason: {
|
|
1500
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1501
|
+
raw: (_a = choice.finish_reason) != null ? _a : void 0
|
|
1502
|
+
},
|
|
1467
1503
|
request: { body: args },
|
|
1468
1504
|
response: {
|
|
1469
1505
|
...getResponseMetadata2(response),
|
|
@@ -1497,7 +1533,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1497
1533
|
abortSignal: options.abortSignal,
|
|
1498
1534
|
fetch: this.config.fetch
|
|
1499
1535
|
});
|
|
1500
|
-
let finishReason =
|
|
1536
|
+
let finishReason = {
|
|
1537
|
+
unified: "other",
|
|
1538
|
+
raw: void 0
|
|
1539
|
+
};
|
|
1501
1540
|
const providerMetadata = { openai: {} };
|
|
1502
1541
|
let usage = void 0;
|
|
1503
1542
|
let isFirstChunk = true;
|
|
@@ -1512,13 +1551,13 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1512
1551
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1513
1552
|
}
|
|
1514
1553
|
if (!chunk.success) {
|
|
1515
|
-
finishReason = "error";
|
|
1554
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1516
1555
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
1517
1556
|
return;
|
|
1518
1557
|
}
|
|
1519
1558
|
const value = chunk.value;
|
|
1520
1559
|
if ("error" in value) {
|
|
1521
|
-
finishReason = "error";
|
|
1560
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
1522
1561
|
controller.enqueue({ type: "error", error: value.error });
|
|
1523
1562
|
return;
|
|
1524
1563
|
}
|
|
@@ -1535,7 +1574,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1535
1574
|
}
|
|
1536
1575
|
const choice = value.choices[0];
|
|
1537
1576
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1538
|
-
finishReason =
|
|
1577
|
+
finishReason = {
|
|
1578
|
+
unified: mapOpenAIFinishReason2(choice.finish_reason),
|
|
1579
|
+
raw: choice.finish_reason
|
|
1580
|
+
};
|
|
1539
1581
|
}
|
|
1540
1582
|
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1541
1583
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -1706,11 +1748,13 @@ var modelMaxImagesPerCall = {
|
|
|
1706
1748
|
"dall-e-3": 1,
|
|
1707
1749
|
"dall-e-2": 10,
|
|
1708
1750
|
"gpt-image-1": 10,
|
|
1709
|
-
"gpt-image-1-mini": 10
|
|
1751
|
+
"gpt-image-1-mini": 10,
|
|
1752
|
+
"gpt-image-1.5": 10
|
|
1710
1753
|
};
|
|
1711
1754
|
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1712
1755
|
"gpt-image-1",
|
|
1713
|
-
"gpt-image-1-mini"
|
|
1756
|
+
"gpt-image-1-mini",
|
|
1757
|
+
"gpt-image-1.5"
|
|
1714
1758
|
]);
|
|
1715
1759
|
|
|
1716
1760
|
// src/image/openai-image-model.ts
|
|
@@ -1729,6 +1773,8 @@ var OpenAIImageModel = class {
|
|
|
1729
1773
|
}
|
|
1730
1774
|
async doGenerate({
|
|
1731
1775
|
prompt,
|
|
1776
|
+
files,
|
|
1777
|
+
mask,
|
|
1732
1778
|
n,
|
|
1733
1779
|
size,
|
|
1734
1780
|
aspectRatio,
|
|
@@ -1737,7 +1783,7 @@ var OpenAIImageModel = class {
|
|
|
1737
1783
|
headers,
|
|
1738
1784
|
abortSignal
|
|
1739
1785
|
}) {
|
|
1740
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
1786
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
1741
1787
|
const warnings = [];
|
|
1742
1788
|
if (aspectRatio != null) {
|
|
1743
1789
|
warnings.push({
|
|
@@ -1750,6 +1796,72 @@ var OpenAIImageModel = class {
|
|
|
1750
1796
|
warnings.push({ type: "unsupported", feature: "seed" });
|
|
1751
1797
|
}
|
|
1752
1798
|
const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
|
|
1799
|
+
if (files != null) {
|
|
1800
|
+
const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
|
|
1801
|
+
url: this.config.url({
|
|
1802
|
+
path: "/images/edits",
|
|
1803
|
+
modelId: this.modelId
|
|
1804
|
+
}),
|
|
1805
|
+
headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
|
|
1806
|
+
formData: (0, import_provider_utils13.convertToFormData)({
|
|
1807
|
+
model: this.modelId,
|
|
1808
|
+
prompt,
|
|
1809
|
+
image: await Promise.all(
|
|
1810
|
+
files.map(
|
|
1811
|
+
(file) => file.type === "file" ? new Blob(
|
|
1812
|
+
[
|
|
1813
|
+
file.data instanceof Uint8Array ? new Blob([file.data], {
|
|
1814
|
+
type: file.mediaType
|
|
1815
|
+
}) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
|
|
1816
|
+
type: file.mediaType
|
|
1817
|
+
})
|
|
1818
|
+
],
|
|
1819
|
+
{ type: file.mediaType }
|
|
1820
|
+
) : (0, import_provider_utils13.downloadBlob)(file.url)
|
|
1821
|
+
)
|
|
1822
|
+
),
|
|
1823
|
+
mask: mask != null ? await fileToBlob(mask) : void 0,
|
|
1824
|
+
n,
|
|
1825
|
+
size,
|
|
1826
|
+
...(_d = providerOptions.openai) != null ? _d : {}
|
|
1827
|
+
}),
|
|
1828
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
1829
|
+
successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
|
|
1830
|
+
openaiImageResponseSchema
|
|
1831
|
+
),
|
|
1832
|
+
abortSignal,
|
|
1833
|
+
fetch: this.config.fetch
|
|
1834
|
+
});
|
|
1835
|
+
return {
|
|
1836
|
+
images: response2.data.map((item) => item.b64_json),
|
|
1837
|
+
warnings,
|
|
1838
|
+
usage: response2.usage != null ? {
|
|
1839
|
+
inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
|
|
1840
|
+
outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
|
|
1841
|
+
totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
|
|
1842
|
+
} : void 0,
|
|
1843
|
+
response: {
|
|
1844
|
+
timestamp: currentDate,
|
|
1845
|
+
modelId: this.modelId,
|
|
1846
|
+
headers: responseHeaders2
|
|
1847
|
+
},
|
|
1848
|
+
providerMetadata: {
|
|
1849
|
+
openai: {
|
|
1850
|
+
images: response2.data.map((item) => {
|
|
1851
|
+
var _a2, _b2, _c2, _d2, _e2;
|
|
1852
|
+
return {
|
|
1853
|
+
...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
|
|
1854
|
+
created: (_a2 = response2.created) != null ? _a2 : void 0,
|
|
1855
|
+
size: (_b2 = response2.size) != null ? _b2 : void 0,
|
|
1856
|
+
quality: (_c2 = response2.quality) != null ? _c2 : void 0,
|
|
1857
|
+
background: (_d2 = response2.background) != null ? _d2 : void 0,
|
|
1858
|
+
outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
|
|
1859
|
+
};
|
|
1860
|
+
})
|
|
1861
|
+
}
|
|
1862
|
+
}
|
|
1863
|
+
};
|
|
1864
|
+
}
|
|
1753
1865
|
const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
|
|
1754
1866
|
url: this.config.url({
|
|
1755
1867
|
path: "/images/generations",
|
|
@@ -1761,7 +1873,7 @@ var OpenAIImageModel = class {
|
|
|
1761
1873
|
prompt,
|
|
1762
1874
|
n,
|
|
1763
1875
|
size,
|
|
1764
|
-
...(
|
|
1876
|
+
...(_h = providerOptions.openai) != null ? _h : {},
|
|
1765
1877
|
...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
|
|
1766
1878
|
},
|
|
1767
1879
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
@@ -1775,9 +1887,9 @@ var OpenAIImageModel = class {
|
|
|
1775
1887
|
images: response.data.map((item) => item.b64_json),
|
|
1776
1888
|
warnings,
|
|
1777
1889
|
usage: response.usage != null ? {
|
|
1778
|
-
inputTokens: (
|
|
1779
|
-
outputTokens: (
|
|
1780
|
-
totalTokens: (
|
|
1890
|
+
inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
|
|
1891
|
+
outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
|
|
1892
|
+
totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
|
|
1781
1893
|
} : void 0,
|
|
1782
1894
|
response: {
|
|
1783
1895
|
timestamp: currentDate,
|
|
@@ -1802,6 +1914,14 @@ var OpenAIImageModel = class {
|
|
|
1802
1914
|
};
|
|
1803
1915
|
}
|
|
1804
1916
|
};
|
|
1917
|
+
async function fileToBlob(file) {
|
|
1918
|
+
if (!file) return void 0;
|
|
1919
|
+
if (file.type === "url") {
|
|
1920
|
+
return (0, import_provider_utils13.downloadBlob)(file.url);
|
|
1921
|
+
}
|
|
1922
|
+
const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
|
|
1923
|
+
return new Blob([data], { type: file.mediaType });
|
|
1924
|
+
}
|
|
1805
1925
|
|
|
1806
1926
|
// src/tool/apply-patch.ts
|
|
1807
1927
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
|
@@ -2163,16 +2283,14 @@ var mcpArgsSchema = (0, import_provider_utils22.lazySchema)(
|
|
|
2163
2283
|
authorization: import_v417.z.string().optional(),
|
|
2164
2284
|
connectorId: import_v417.z.string().optional(),
|
|
2165
2285
|
headers: import_v417.z.record(import_v417.z.string(), import_v417.z.string()).optional(),
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
// ])
|
|
2175
|
-
// .optional(),
|
|
2286
|
+
requireApproval: import_v417.z.union([
|
|
2287
|
+
import_v417.z.enum(["always", "never"]),
|
|
2288
|
+
import_v417.z.object({
|
|
2289
|
+
never: import_v417.z.object({
|
|
2290
|
+
toolNames: import_v417.z.array(import_v417.z.string()).optional()
|
|
2291
|
+
}).optional()
|
|
2292
|
+
})
|
|
2293
|
+
]).optional(),
|
|
2176
2294
|
serverDescription: import_v417.z.string().optional(),
|
|
2177
2295
|
serverUrl: import_v417.z.string().optional()
|
|
2178
2296
|
}).refine(
|
|
@@ -2184,36 +2302,14 @@ var mcpArgsSchema = (0, import_provider_utils22.lazySchema)(
|
|
|
2184
2302
|
var mcpInputSchema = (0, import_provider_utils22.lazySchema)(() => (0, import_provider_utils22.zodSchema)(import_v417.z.object({})));
|
|
2185
2303
|
var mcpOutputSchema = (0, import_provider_utils22.lazySchema)(
|
|
2186
2304
|
() => (0, import_provider_utils22.zodSchema)(
|
|
2187
|
-
import_v417.z.
|
|
2188
|
-
import_v417.z.
|
|
2189
|
-
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
|
|
2195
|
-
}),
|
|
2196
|
-
import_v417.z.object({
|
|
2197
|
-
type: import_v417.z.literal("listTools"),
|
|
2198
|
-
serverLabel: import_v417.z.string(),
|
|
2199
|
-
tools: import_v417.z.array(
|
|
2200
|
-
import_v417.z.object({
|
|
2201
|
-
name: import_v417.z.string(),
|
|
2202
|
-
description: import_v417.z.string().optional(),
|
|
2203
|
-
inputSchema: jsonValueSchema,
|
|
2204
|
-
annotations: import_v417.z.record(import_v417.z.string(), jsonValueSchema).optional()
|
|
2205
|
-
})
|
|
2206
|
-
),
|
|
2207
|
-
error: import_v417.z.union([import_v417.z.string(), jsonValueSchema]).optional()
|
|
2208
|
-
}),
|
|
2209
|
-
import_v417.z.object({
|
|
2210
|
-
type: import_v417.z.literal("approvalRequest"),
|
|
2211
|
-
serverLabel: import_v417.z.string(),
|
|
2212
|
-
name: import_v417.z.string(),
|
|
2213
|
-
arguments: import_v417.z.string(),
|
|
2214
|
-
approvalRequestId: import_v417.z.string()
|
|
2215
|
-
})
|
|
2216
|
-
])
|
|
2305
|
+
import_v417.z.object({
|
|
2306
|
+
type: import_v417.z.literal("call"),
|
|
2307
|
+
serverLabel: import_v417.z.string(),
|
|
2308
|
+
name: import_v417.z.string(),
|
|
2309
|
+
arguments: import_v417.z.string(),
|
|
2310
|
+
output: import_v417.z.string().nullable().optional(),
|
|
2311
|
+
error: import_v417.z.union([import_v417.z.string(), jsonValueSchema]).optional()
|
|
2312
|
+
})
|
|
2217
2313
|
)
|
|
2218
2314
|
);
|
|
2219
2315
|
var mcpToolFactory = (0, import_provider_utils22.createProviderToolFactoryWithOutputSchema)({
|
|
@@ -2605,6 +2701,9 @@ async function convertToOpenAIResponsesInput({
|
|
|
2605
2701
|
}
|
|
2606
2702
|
case "tool": {
|
|
2607
2703
|
for (const part of content) {
|
|
2704
|
+
if (part.type === "tool-approval-response") {
|
|
2705
|
+
continue;
|
|
2706
|
+
}
|
|
2608
2707
|
const output = part.output;
|
|
2609
2708
|
const resolvedToolName = toolNameMapping.toProviderToolName(
|
|
2610
2709
|
part.toolName
|
|
@@ -2619,7 +2718,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2619
2718
|
call_id: part.toolCallId,
|
|
2620
2719
|
output: parsedOutput.output
|
|
2621
2720
|
});
|
|
2622
|
-
|
|
2721
|
+
continue;
|
|
2623
2722
|
}
|
|
2624
2723
|
if (hasShellTool && resolvedToolName === "shell" && output.type === "json") {
|
|
2625
2724
|
const parsedOutput = await (0, import_provider_utils23.validateTypes)({
|
|
@@ -2638,7 +2737,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2638
2737
|
}
|
|
2639
2738
|
}))
|
|
2640
2739
|
});
|
|
2641
|
-
|
|
2740
|
+
continue;
|
|
2642
2741
|
}
|
|
2643
2742
|
if (hasApplyPatchTool && part.toolName === "apply_patch" && output.type === "json") {
|
|
2644
2743
|
const parsedOutput = await (0, import_provider_utils23.validateTypes)({
|
|
@@ -2651,7 +2750,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2651
2750
|
status: parsedOutput.status,
|
|
2652
2751
|
output: parsedOutput.output
|
|
2653
2752
|
});
|
|
2654
|
-
|
|
2753
|
+
continue;
|
|
2655
2754
|
}
|
|
2656
2755
|
let contentValue;
|
|
2657
2756
|
switch (output.type) {
|
|
@@ -2732,7 +2831,7 @@ function mapOpenAIResponseFinishReason({
|
|
|
2732
2831
|
case "content_filter":
|
|
2733
2832
|
return "content-filter";
|
|
2734
2833
|
default:
|
|
2735
|
-
return hasFunctionCall ? "tool-calls" : "
|
|
2834
|
+
return hasFunctionCall ? "tool-calls" : "other";
|
|
2736
2835
|
}
|
|
2737
2836
|
}
|
|
2738
2837
|
|
|
@@ -3131,6 +3230,19 @@ var openaiResponsesChunkSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
3131
3230
|
item_id: import_v419.z.string(),
|
|
3132
3231
|
summary_index: import_v419.z.number()
|
|
3133
3232
|
}),
|
|
3233
|
+
import_v419.z.object({
|
|
3234
|
+
type: import_v419.z.literal("response.apply_patch_call_operation_diff.delta"),
|
|
3235
|
+
item_id: import_v419.z.string(),
|
|
3236
|
+
output_index: import_v419.z.number(),
|
|
3237
|
+
delta: import_v419.z.string(),
|
|
3238
|
+
obfuscation: import_v419.z.string().nullish()
|
|
3239
|
+
}),
|
|
3240
|
+
import_v419.z.object({
|
|
3241
|
+
type: import_v419.z.literal("response.apply_patch_call_operation_diff.done"),
|
|
3242
|
+
item_id: import_v419.z.string(),
|
|
3243
|
+
output_index: import_v419.z.number(),
|
|
3244
|
+
diff: import_v419.z.string()
|
|
3245
|
+
}),
|
|
3134
3246
|
import_v419.z.object({
|
|
3135
3247
|
type: import_v419.z.literal("error"),
|
|
3136
3248
|
sequence_number: import_v419.z.number(),
|
|
@@ -3611,7 +3723,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
|
|
|
3611
3723
|
* Defaults to `undefined`.
|
|
3612
3724
|
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3613
3725
|
*/
|
|
3614
|
-
user: import_v420.z.string().nullish()
|
|
3726
|
+
user: import_v420.z.string().nullish(),
|
|
3727
|
+
/**
|
|
3728
|
+
* Override the system message mode for this model.
|
|
3729
|
+
* - 'system': Use the 'system' role for system messages (default for most models)
|
|
3730
|
+
* - 'developer': Use the 'developer' role for system messages (used by reasoning models)
|
|
3731
|
+
* - 'remove': Remove system messages entirely
|
|
3732
|
+
*
|
|
3733
|
+
* If not specified, the mode is automatically determined based on the model.
|
|
3734
|
+
*/
|
|
3735
|
+
systemMessageMode: import_v420.z.enum(["system", "developer", "remove"]).optional(),
|
|
3736
|
+
/**
|
|
3737
|
+
* Force treating this model as a reasoning model.
|
|
3738
|
+
*
|
|
3739
|
+
* This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
|
|
3740
|
+
* where the model ID is not recognized by the SDK's allowlist.
|
|
3741
|
+
*
|
|
3742
|
+
* When enabled, the SDK applies reasoning-model parameter compatibility rules
|
|
3743
|
+
* and defaults `systemMessageMode` to `developer` unless overridden.
|
|
3744
|
+
*/
|
|
3745
|
+
forceReasoning: import_v420.z.boolean().optional()
|
|
3615
3746
|
})
|
|
3616
3747
|
)
|
|
3617
3748
|
);
|
|
@@ -3822,7 +3953,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3822
3953
|
toolChoice,
|
|
3823
3954
|
responseFormat
|
|
3824
3955
|
}) {
|
|
3825
|
-
var _a, _b, _c, _d;
|
|
3956
|
+
var _a, _b, _c, _d, _e, _f;
|
|
3826
3957
|
const warnings = [];
|
|
3827
3958
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
3828
3959
|
if (topK != null) {
|
|
@@ -3845,6 +3976,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3845
3976
|
providerOptions,
|
|
3846
3977
|
schema: openaiResponsesProviderOptionsSchema
|
|
3847
3978
|
});
|
|
3979
|
+
const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
|
|
3848
3980
|
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3849
3981
|
warnings.push({
|
|
3850
3982
|
type: "unsupported",
|
|
@@ -3869,15 +4001,15 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3869
4001
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3870
4002
|
prompt,
|
|
3871
4003
|
toolNameMapping,
|
|
3872
|
-
systemMessageMode: modelCapabilities.systemMessageMode,
|
|
4004
|
+
systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
|
|
3873
4005
|
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
3874
|
-
store: (
|
|
4006
|
+
store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
|
|
3875
4007
|
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
3876
4008
|
hasShellTool: hasOpenAITool("openai.shell"),
|
|
3877
4009
|
hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
|
|
3878
4010
|
});
|
|
3879
4011
|
warnings.push(...inputWarnings);
|
|
3880
|
-
const strictJsonSchema = (
|
|
4012
|
+
const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
|
|
3881
4013
|
let include = openaiOptions == null ? void 0 : openaiOptions.include;
|
|
3882
4014
|
function addInclude(key) {
|
|
3883
4015
|
if (include == null) {
|
|
@@ -3893,9 +4025,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3893
4025
|
if (topLogprobs) {
|
|
3894
4026
|
addInclude("message.output_text.logprobs");
|
|
3895
4027
|
}
|
|
3896
|
-
const webSearchToolName = (
|
|
4028
|
+
const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
|
|
3897
4029
|
(tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
|
|
3898
|
-
)) == null ? void 0 :
|
|
4030
|
+
)) == null ? void 0 : _e.name;
|
|
3899
4031
|
if (webSearchToolName) {
|
|
3900
4032
|
addInclude("web_search_call.action.sources");
|
|
3901
4033
|
}
|
|
@@ -3903,7 +4035,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3903
4035
|
addInclude("code_interpreter_call.outputs");
|
|
3904
4036
|
}
|
|
3905
4037
|
const store = openaiOptions == null ? void 0 : openaiOptions.store;
|
|
3906
|
-
if (store === false &&
|
|
4038
|
+
if (store === false && isReasoningModel) {
|
|
3907
4039
|
addInclude("reasoning.encrypted_content");
|
|
3908
4040
|
}
|
|
3909
4041
|
const baseArgs = {
|
|
@@ -3918,7 +4050,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3918
4050
|
format: responseFormat.schema != null ? {
|
|
3919
4051
|
type: "json_schema",
|
|
3920
4052
|
strict: strictJsonSchema,
|
|
3921
|
-
name: (
|
|
4053
|
+
name: (_f = responseFormat.name) != null ? _f : "response",
|
|
3922
4054
|
description: responseFormat.description,
|
|
3923
4055
|
schema: responseFormat.schema
|
|
3924
4056
|
} : { type: "json_object" }
|
|
@@ -3945,7 +4077,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3945
4077
|
top_logprobs: topLogprobs,
|
|
3946
4078
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
3947
4079
|
// model-specific settings:
|
|
3948
|
-
...
|
|
4080
|
+
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
3949
4081
|
reasoning: {
|
|
3950
4082
|
...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
|
|
3951
4083
|
effort: openaiOptions.reasoningEffort
|
|
@@ -3956,7 +4088,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3956
4088
|
}
|
|
3957
4089
|
}
|
|
3958
4090
|
};
|
|
3959
|
-
if (
|
|
4091
|
+
if (isReasoningModel) {
|
|
3960
4092
|
if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
|
|
3961
4093
|
if (baseArgs.temperature != null) {
|
|
3962
4094
|
baseArgs.temperature = void 0;
|
|
@@ -4028,7 +4160,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4028
4160
|
};
|
|
4029
4161
|
}
|
|
4030
4162
|
async doGenerate(options) {
|
|
4031
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
4163
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z;
|
|
4032
4164
|
const {
|
|
4033
4165
|
args: body,
|
|
4034
4166
|
warnings,
|
|
@@ -4279,54 +4411,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4279
4411
|
break;
|
|
4280
4412
|
}
|
|
4281
4413
|
case "mcp_list_tools": {
|
|
4282
|
-
content.push({
|
|
4283
|
-
type: "tool-call",
|
|
4284
|
-
toolCallId: part.id,
|
|
4285
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4286
|
-
input: JSON.stringify({}),
|
|
4287
|
-
providerExecuted: true
|
|
4288
|
-
});
|
|
4289
|
-
content.push({
|
|
4290
|
-
type: "tool-result",
|
|
4291
|
-
toolCallId: part.id,
|
|
4292
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4293
|
-
result: {
|
|
4294
|
-
type: "listTools",
|
|
4295
|
-
serverLabel: part.server_label,
|
|
4296
|
-
tools: part.tools.map((t) => {
|
|
4297
|
-
var _a2, _b2;
|
|
4298
|
-
return {
|
|
4299
|
-
name: t.name,
|
|
4300
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
4301
|
-
inputSchema: t.input_schema,
|
|
4302
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
4303
|
-
};
|
|
4304
|
-
}),
|
|
4305
|
-
...part.error != null ? { error: part.error } : {}
|
|
4306
|
-
}
|
|
4307
|
-
});
|
|
4308
4414
|
break;
|
|
4309
4415
|
}
|
|
4310
4416
|
case "mcp_approval_request": {
|
|
4311
|
-
content.push({
|
|
4312
|
-
type: "tool-call",
|
|
4313
|
-
toolCallId: part.id,
|
|
4314
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4315
|
-
input: JSON.stringify({}),
|
|
4316
|
-
providerExecuted: true
|
|
4317
|
-
});
|
|
4318
|
-
content.push({
|
|
4319
|
-
type: "tool-result",
|
|
4320
|
-
toolCallId: part.id,
|
|
4321
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4322
|
-
result: {
|
|
4323
|
-
type: "approvalRequest",
|
|
4324
|
-
serverLabel: part.server_label,
|
|
4325
|
-
name: part.name,
|
|
4326
|
-
arguments: part.arguments,
|
|
4327
|
-
approvalRequestId: part.approval_request_id
|
|
4328
|
-
}
|
|
4329
|
-
});
|
|
4330
4417
|
break;
|
|
4331
4418
|
}
|
|
4332
4419
|
case "computer_call": {
|
|
@@ -4425,10 +4512,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4425
4512
|
const usage = response.usage;
|
|
4426
4513
|
return {
|
|
4427
4514
|
content,
|
|
4428
|
-
finishReason:
|
|
4429
|
-
|
|
4430
|
-
|
|
4431
|
-
|
|
4515
|
+
finishReason: {
|
|
4516
|
+
unified: mapOpenAIResponseFinishReason({
|
|
4517
|
+
finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
|
|
4518
|
+
hasFunctionCall
|
|
4519
|
+
}),
|
|
4520
|
+
raw: (_z = (_y = response.incomplete_details) == null ? void 0 : _y.reason) != null ? _z : void 0
|
|
4521
|
+
},
|
|
4432
4522
|
usage: convertOpenAIResponsesUsage(usage),
|
|
4433
4523
|
request: { body },
|
|
4434
4524
|
response: {
|
|
@@ -4469,7 +4559,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4469
4559
|
});
|
|
4470
4560
|
const self = this;
|
|
4471
4561
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4472
|
-
let finishReason =
|
|
4562
|
+
let finishReason = {
|
|
4563
|
+
unified: "other",
|
|
4564
|
+
raw: void 0
|
|
4565
|
+
};
|
|
4473
4566
|
let usage = void 0;
|
|
4474
4567
|
const logprobs = [];
|
|
4475
4568
|
let responseId = null;
|
|
@@ -4485,12 +4578,12 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4485
4578
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4486
4579
|
},
|
|
4487
4580
|
transform(chunk, controller) {
|
|
4488
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
|
|
4581
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C;
|
|
4489
4582
|
if (options.includeRawChunks) {
|
|
4490
4583
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4491
4584
|
}
|
|
4492
4585
|
if (!chunk.success) {
|
|
4493
|
-
finishReason = "error";
|
|
4586
|
+
finishReason = { unified: "error", raw: void 0 };
|
|
4494
4587
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
4495
4588
|
return;
|
|
4496
4589
|
}
|
|
@@ -4589,24 +4682,40 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4589
4682
|
providerExecuted: true
|
|
4590
4683
|
});
|
|
4591
4684
|
} else if (value.item.type === "apply_patch_call") {
|
|
4685
|
+
const { call_id: callId, operation } = value.item;
|
|
4592
4686
|
ongoingToolCalls[value.output_index] = {
|
|
4593
4687
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
4594
|
-
toolCallId:
|
|
4688
|
+
toolCallId: callId,
|
|
4689
|
+
applyPatch: {
|
|
4690
|
+
// delete_file doesn't have diff
|
|
4691
|
+
hasDiff: operation.type === "delete_file",
|
|
4692
|
+
endEmitted: operation.type === "delete_file"
|
|
4693
|
+
}
|
|
4595
4694
|
};
|
|
4596
|
-
|
|
4695
|
+
controller.enqueue({
|
|
4696
|
+
type: "tool-input-start",
|
|
4697
|
+
id: callId,
|
|
4698
|
+
toolName: toolNameMapping.toCustomToolName("apply_patch")
|
|
4699
|
+
});
|
|
4700
|
+
if (operation.type === "delete_file") {
|
|
4701
|
+
const inputString = JSON.stringify({
|
|
4702
|
+
callId,
|
|
4703
|
+
operation
|
|
4704
|
+
});
|
|
4597
4705
|
controller.enqueue({
|
|
4598
|
-
type: "tool-
|
|
4599
|
-
|
|
4600
|
-
|
|
4601
|
-
|
|
4602
|
-
|
|
4603
|
-
|
|
4604
|
-
|
|
4605
|
-
|
|
4606
|
-
|
|
4607
|
-
|
|
4608
|
-
|
|
4609
|
-
|
|
4706
|
+
type: "tool-input-delta",
|
|
4707
|
+
id: callId,
|
|
4708
|
+
delta: inputString
|
|
4709
|
+
});
|
|
4710
|
+
controller.enqueue({
|
|
4711
|
+
type: "tool-input-end",
|
|
4712
|
+
id: callId
|
|
4713
|
+
});
|
|
4714
|
+
} else {
|
|
4715
|
+
controller.enqueue({
|
|
4716
|
+
type: "tool-input-delta",
|
|
4717
|
+
id: callId,
|
|
4718
|
+
delta: `{"callId":"${escapeJSONDelta(callId)}","operation":{"type":"${escapeJSONDelta(operation.type)}","path":"${escapeJSONDelta(operation.path)}","diff":"`
|
|
4610
4719
|
});
|
|
4611
4720
|
}
|
|
4612
4721
|
} else if (value.item.type === "shell_call") {
|
|
@@ -4758,31 +4867,31 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4758
4867
|
});
|
|
4759
4868
|
} else if (value.item.type === "mcp_list_tools") {
|
|
4760
4869
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4761
|
-
controller.enqueue({
|
|
4762
|
-
type: "tool-result",
|
|
4763
|
-
toolCallId: value.item.id,
|
|
4764
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4765
|
-
result: {
|
|
4766
|
-
type: "listTools",
|
|
4767
|
-
serverLabel: value.item.server_label,
|
|
4768
|
-
tools: value.item.tools.map((t) => {
|
|
4769
|
-
var _a2, _b2;
|
|
4770
|
-
return {
|
|
4771
|
-
name: t.name,
|
|
4772
|
-
description: (_a2 = t.description) != null ? _a2 : void 0,
|
|
4773
|
-
inputSchema: t.input_schema,
|
|
4774
|
-
annotations: (_b2 = t.annotations) != null ? _b2 : void 0
|
|
4775
|
-
};
|
|
4776
|
-
}),
|
|
4777
|
-
...value.item.error != null ? { error: value.item.error } : {}
|
|
4778
|
-
}
|
|
4779
|
-
});
|
|
4780
4870
|
} else if (value.item.type === "apply_patch_call") {
|
|
4781
|
-
ongoingToolCalls[value.output_index]
|
|
4782
|
-
if (value.item.
|
|
4871
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
4872
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted && value.item.operation.type !== "delete_file") {
|
|
4873
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
4874
|
+
controller.enqueue({
|
|
4875
|
+
type: "tool-input-delta",
|
|
4876
|
+
id: toolCall.toolCallId,
|
|
4877
|
+
delta: escapeJSONDelta(value.item.operation.diff)
|
|
4878
|
+
});
|
|
4879
|
+
}
|
|
4880
|
+
controller.enqueue({
|
|
4881
|
+
type: "tool-input-delta",
|
|
4882
|
+
id: toolCall.toolCallId,
|
|
4883
|
+
delta: '"}}'
|
|
4884
|
+
});
|
|
4885
|
+
controller.enqueue({
|
|
4886
|
+
type: "tool-input-end",
|
|
4887
|
+
id: toolCall.toolCallId
|
|
4888
|
+
});
|
|
4889
|
+
toolCall.applyPatch.endEmitted = true;
|
|
4890
|
+
}
|
|
4891
|
+
if (toolCall && value.item.status === "completed") {
|
|
4783
4892
|
controller.enqueue({
|
|
4784
4893
|
type: "tool-call",
|
|
4785
|
-
toolCallId:
|
|
4894
|
+
toolCallId: toolCall.toolCallId,
|
|
4786
4895
|
toolName: toolNameMapping.toCustomToolName("apply_patch"),
|
|
4787
4896
|
input: JSON.stringify({
|
|
4788
4897
|
callId: value.item.call_id,
|
|
@@ -4795,20 +4904,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4795
4904
|
}
|
|
4796
4905
|
});
|
|
4797
4906
|
}
|
|
4907
|
+
ongoingToolCalls[value.output_index] = void 0;
|
|
4798
4908
|
} else if (value.item.type === "mcp_approval_request") {
|
|
4799
4909
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4800
|
-
controller.enqueue({
|
|
4801
|
-
type: "tool-result",
|
|
4802
|
-
toolCallId: value.item.id,
|
|
4803
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4804
|
-
result: {
|
|
4805
|
-
type: "approvalRequest",
|
|
4806
|
-
serverLabel: value.item.server_label,
|
|
4807
|
-
name: value.item.name,
|
|
4808
|
-
arguments: value.item.arguments,
|
|
4809
|
-
approvalRequestId: value.item.approval_request_id
|
|
4810
|
-
}
|
|
4811
|
-
});
|
|
4812
4910
|
} else if (value.item.type === "local_shell_call") {
|
|
4813
4911
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4814
4912
|
controller.enqueue({
|
|
@@ -4874,6 +4972,38 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4874
4972
|
delta: value.delta
|
|
4875
4973
|
});
|
|
4876
4974
|
}
|
|
4975
|
+
} else if (isResponseApplyPatchCallOperationDiffDeltaChunk(value)) {
|
|
4976
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
4977
|
+
if (toolCall == null ? void 0 : toolCall.applyPatch) {
|
|
4978
|
+
controller.enqueue({
|
|
4979
|
+
type: "tool-input-delta",
|
|
4980
|
+
id: toolCall.toolCallId,
|
|
4981
|
+
delta: escapeJSONDelta(value.delta)
|
|
4982
|
+
});
|
|
4983
|
+
toolCall.applyPatch.hasDiff = true;
|
|
4984
|
+
}
|
|
4985
|
+
} else if (isResponseApplyPatchCallOperationDiffDoneChunk(value)) {
|
|
4986
|
+
const toolCall = ongoingToolCalls[value.output_index];
|
|
4987
|
+
if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted) {
|
|
4988
|
+
if (!toolCall.applyPatch.hasDiff) {
|
|
4989
|
+
controller.enqueue({
|
|
4990
|
+
type: "tool-input-delta",
|
|
4991
|
+
id: toolCall.toolCallId,
|
|
4992
|
+
delta: escapeJSONDelta(value.diff)
|
|
4993
|
+
});
|
|
4994
|
+
toolCall.applyPatch.hasDiff = true;
|
|
4995
|
+
}
|
|
4996
|
+
controller.enqueue({
|
|
4997
|
+
type: "tool-input-delta",
|
|
4998
|
+
id: toolCall.toolCallId,
|
|
4999
|
+
delta: '"}}'
|
|
5000
|
+
});
|
|
5001
|
+
controller.enqueue({
|
|
5002
|
+
type: "tool-input-end",
|
|
5003
|
+
id: toolCall.toolCallId
|
|
5004
|
+
});
|
|
5005
|
+
toolCall.applyPatch.endEmitted = true;
|
|
5006
|
+
}
|
|
4877
5007
|
} else if (isResponseImageGenerationCallPartialImageChunk(value)) {
|
|
4878
5008
|
controller.enqueue({
|
|
4879
5009
|
type: "tool-result",
|
|
@@ -4890,9 +5020,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4890
5020
|
controller.enqueue({
|
|
4891
5021
|
type: "tool-input-delta",
|
|
4892
5022
|
id: toolCall.toolCallId,
|
|
4893
|
-
|
|
4894
|
-
// To escape it, we use JSON.stringify and slice to remove the outer quotes.
|
|
4895
|
-
delta: JSON.stringify(value.delta).slice(1, -1)
|
|
5023
|
+
delta: escapeJSONDelta(value.delta)
|
|
4896
5024
|
});
|
|
4897
5025
|
}
|
|
4898
5026
|
} else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
|
|
@@ -4989,10 +5117,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4989
5117
|
activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
|
|
4990
5118
|
}
|
|
4991
5119
|
} else if (isResponseFinishedChunk(value)) {
|
|
4992
|
-
finishReason =
|
|
4993
|
-
|
|
4994
|
-
|
|
4995
|
-
|
|
5120
|
+
finishReason = {
|
|
5121
|
+
unified: mapOpenAIResponseFinishReason({
|
|
5122
|
+
finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
|
|
5123
|
+
hasFunctionCall
|
|
5124
|
+
}),
|
|
5125
|
+
raw: (_k = (_j = value.response.incomplete_details) == null ? void 0 : _j.reason) != null ? _k : void 0
|
|
5126
|
+
};
|
|
4996
5127
|
usage = value.response.usage;
|
|
4997
5128
|
if (typeof value.response.service_tier === "string") {
|
|
4998
5129
|
serviceTier = value.response.service_tier;
|
|
@@ -5003,7 +5134,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5003
5134
|
controller.enqueue({
|
|
5004
5135
|
type: "source",
|
|
5005
5136
|
sourceType: "url",
|
|
5006
|
-
id: (
|
|
5137
|
+
id: (_n = (_m = (_l = self.config).generateId) == null ? void 0 : _m.call(_l)) != null ? _n : (0, import_provider_utils27.generateId)(),
|
|
5007
5138
|
url: value.annotation.url,
|
|
5008
5139
|
title: value.annotation.title
|
|
5009
5140
|
});
|
|
@@ -5011,10 +5142,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5011
5142
|
controller.enqueue({
|
|
5012
5143
|
type: "source",
|
|
5013
5144
|
sourceType: "document",
|
|
5014
|
-
id: (
|
|
5145
|
+
id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils27.generateId)(),
|
|
5015
5146
|
mediaType: "text/plain",
|
|
5016
|
-
title: (
|
|
5017
|
-
filename: (
|
|
5147
|
+
title: (_s = (_r = value.annotation.quote) != null ? _r : value.annotation.filename) != null ? _s : "Document",
|
|
5148
|
+
filename: (_t = value.annotation.filename) != null ? _t : value.annotation.file_id,
|
|
5018
5149
|
...value.annotation.file_id ? {
|
|
5019
5150
|
providerMetadata: {
|
|
5020
5151
|
[providerKey]: {
|
|
@@ -5027,10 +5158,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5027
5158
|
controller.enqueue({
|
|
5028
5159
|
type: "source",
|
|
5029
5160
|
sourceType: "document",
|
|
5030
|
-
id: (
|
|
5161
|
+
id: (_w = (_v = (_u = self.config).generateId) == null ? void 0 : _v.call(_u)) != null ? _w : (0, import_provider_utils27.generateId)(),
|
|
5031
5162
|
mediaType: "text/plain",
|
|
5032
|
-
title: (
|
|
5033
|
-
filename: (
|
|
5163
|
+
title: (_y = (_x = value.annotation.filename) != null ? _x : value.annotation.file_id) != null ? _y : "Document",
|
|
5164
|
+
filename: (_z = value.annotation.filename) != null ? _z : value.annotation.file_id,
|
|
5034
5165
|
providerMetadata: {
|
|
5035
5166
|
[providerKey]: {
|
|
5036
5167
|
fileId: value.annotation.file_id,
|
|
@@ -5043,7 +5174,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5043
5174
|
controller.enqueue({
|
|
5044
5175
|
type: "source",
|
|
5045
5176
|
sourceType: "document",
|
|
5046
|
-
id: (
|
|
5177
|
+
id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : (0, import_provider_utils27.generateId)(),
|
|
5047
5178
|
mediaType: "application/octet-stream",
|
|
5048
5179
|
title: value.annotation.file_id,
|
|
5049
5180
|
filename: value.annotation.file_id,
|
|
@@ -5109,6 +5240,12 @@ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
|
|
|
5109
5240
|
function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
|
|
5110
5241
|
return chunk.type === "response.code_interpreter_call_code.done";
|
|
5111
5242
|
}
|
|
5243
|
+
function isResponseApplyPatchCallOperationDiffDeltaChunk(chunk) {
|
|
5244
|
+
return chunk.type === "response.apply_patch_call_operation_diff.delta";
|
|
5245
|
+
}
|
|
5246
|
+
function isResponseApplyPatchCallOperationDiffDoneChunk(chunk) {
|
|
5247
|
+
return chunk.type === "response.apply_patch_call_operation_diff.done";
|
|
5248
|
+
}
|
|
5112
5249
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
5113
5250
|
return chunk.type === "response.output_item.added";
|
|
5114
5251
|
}
|
|
@@ -5139,6 +5276,9 @@ function mapWebSearchOutput(action) {
|
|
|
5139
5276
|
};
|
|
5140
5277
|
}
|
|
5141
5278
|
}
|
|
5279
|
+
function escapeJSONDelta(delta) {
|
|
5280
|
+
return JSON.stringify(delta).slice(1, -1);
|
|
5281
|
+
}
|
|
5142
5282
|
|
|
5143
5283
|
// src/speech/openai-speech-model.ts
|
|
5144
5284
|
var import_provider_utils29 = require("@ai-sdk/provider-utils");
|
|
@@ -5493,7 +5633,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5493
5633
|
};
|
|
5494
5634
|
|
|
5495
5635
|
// src/version.ts
|
|
5496
|
-
var VERSION = true ? "3.0.0
|
|
5636
|
+
var VERSION = true ? "3.0.0" : "0.0.0-test";
|
|
5497
5637
|
|
|
5498
5638
|
// src/openai-provider.ts
|
|
5499
5639
|
function createOpenAI(options = {}) {
|
|
@@ -5581,6 +5721,8 @@ function createOpenAI(options = {}) {
|
|
|
5581
5721
|
provider.responses = createResponsesModel;
|
|
5582
5722
|
provider.embedding = createEmbeddingModel;
|
|
5583
5723
|
provider.embeddingModel = createEmbeddingModel;
|
|
5724
|
+
provider.textEmbedding = createEmbeddingModel;
|
|
5725
|
+
provider.textEmbeddingModel = createEmbeddingModel;
|
|
5584
5726
|
provider.image = createImageModel;
|
|
5585
5727
|
provider.imageModel = createImageModel;
|
|
5586
5728
|
provider.transcription = createTranscriptionModel;
|