@ai-sdk/openai 3.0.0 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +51 -303
- package/dist/index.js +157 -46
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +157 -46
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +156 -45
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +156 -45
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
|
@@ -2307,7 +2307,7 @@ var mcpOutputSchema = (0, import_provider_utils22.lazySchema)(
|
|
|
2307
2307
|
serverLabel: import_v417.z.string(),
|
|
2308
2308
|
name: import_v417.z.string(),
|
|
2309
2309
|
arguments: import_v417.z.string(),
|
|
2310
|
-
output: import_v417.z.string().
|
|
2310
|
+
output: import_v417.z.string().nullish(),
|
|
2311
2311
|
error: import_v417.z.union([import_v417.z.string(), jsonValueSchema]).optional()
|
|
2312
2312
|
})
|
|
2313
2313
|
)
|
|
@@ -2478,9 +2478,10 @@ async function convertToOpenAIResponsesInput({
|
|
|
2478
2478
|
hasShellTool = false,
|
|
2479
2479
|
hasApplyPatchTool = false
|
|
2480
2480
|
}) {
|
|
2481
|
-
var _a, _b, _c, _d, _e;
|
|
2481
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
2482
2482
|
const input = [];
|
|
2483
2483
|
const warnings = [];
|
|
2484
|
+
const processedApprovalIds = /* @__PURE__ */ new Set();
|
|
2484
2485
|
for (const { role, content } of prompt) {
|
|
2485
2486
|
switch (role) {
|
|
2486
2487
|
case "system": {
|
|
@@ -2571,10 +2572,13 @@ async function convertToOpenAIResponsesInput({
|
|
|
2571
2572
|
break;
|
|
2572
2573
|
}
|
|
2573
2574
|
case "tool-call": {
|
|
2575
|
+
const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e.openai) == null ? void 0 : _f.itemId;
|
|
2574
2576
|
if (part.providerExecuted) {
|
|
2577
|
+
if (store && id != null) {
|
|
2578
|
+
input.push({ type: "item_reference", id });
|
|
2579
|
+
}
|
|
2575
2580
|
break;
|
|
2576
2581
|
}
|
|
2577
|
-
const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId;
|
|
2578
2582
|
if (store && id != null) {
|
|
2579
2583
|
input.push({ type: "item_reference", id });
|
|
2580
2584
|
break;
|
|
@@ -2631,8 +2635,12 @@ async function convertToOpenAIResponsesInput({
|
|
|
2631
2635
|
}
|
|
2632
2636
|
// assistant tool result parts are from provider-executed tools:
|
|
2633
2637
|
case "tool-result": {
|
|
2638
|
+
if (part.output.type === "execution-denied" || part.output.type === "json" && typeof part.output.value === "object" && part.output.value != null && "type" in part.output.value && part.output.value.type === "execution-denied") {
|
|
2639
|
+
break;
|
|
2640
|
+
}
|
|
2634
2641
|
if (store) {
|
|
2635
|
-
|
|
2642
|
+
const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h.openai) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
|
|
2643
|
+
input.push({ type: "item_reference", id: itemId });
|
|
2636
2644
|
} else {
|
|
2637
2645
|
warnings.push({
|
|
2638
2646
|
type: "other",
|
|
@@ -2702,9 +2710,31 @@ async function convertToOpenAIResponsesInput({
|
|
|
2702
2710
|
case "tool": {
|
|
2703
2711
|
for (const part of content) {
|
|
2704
2712
|
if (part.type === "tool-approval-response") {
|
|
2713
|
+
const approvalResponse = part;
|
|
2714
|
+
if (processedApprovalIds.has(approvalResponse.approvalId)) {
|
|
2715
|
+
continue;
|
|
2716
|
+
}
|
|
2717
|
+
processedApprovalIds.add(approvalResponse.approvalId);
|
|
2718
|
+
if (store) {
|
|
2719
|
+
input.push({
|
|
2720
|
+
type: "item_reference",
|
|
2721
|
+
id: approvalResponse.approvalId
|
|
2722
|
+
});
|
|
2723
|
+
}
|
|
2724
|
+
input.push({
|
|
2725
|
+
type: "mcp_approval_response",
|
|
2726
|
+
approval_request_id: approvalResponse.approvalId,
|
|
2727
|
+
approve: approvalResponse.approved
|
|
2728
|
+
});
|
|
2705
2729
|
continue;
|
|
2706
2730
|
}
|
|
2707
2731
|
const output = part.output;
|
|
2732
|
+
if (output.type === "execution-denied") {
|
|
2733
|
+
const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
|
|
2734
|
+
if (approvalId) {
|
|
2735
|
+
continue;
|
|
2736
|
+
}
|
|
2737
|
+
}
|
|
2708
2738
|
const resolvedToolName = toolNameMapping.toProviderToolName(
|
|
2709
2739
|
part.toolName
|
|
2710
2740
|
);
|
|
@@ -2759,7 +2789,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2759
2789
|
contentValue = output.value;
|
|
2760
2790
|
break;
|
|
2761
2791
|
case "execution-denied":
|
|
2762
|
-
contentValue = (
|
|
2792
|
+
contentValue = (_m = output.reason) != null ? _m : "Tool execution denied.";
|
|
2763
2793
|
break;
|
|
2764
2794
|
case "json":
|
|
2765
2795
|
case "error-json":
|
|
@@ -2934,7 +2964,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
2934
2964
|
import_v419.z.object({
|
|
2935
2965
|
type: import_v419.z.literal("mcp_call"),
|
|
2936
2966
|
id: import_v419.z.string(),
|
|
2937
|
-
status: import_v419.z.string()
|
|
2967
|
+
status: import_v419.z.string(),
|
|
2968
|
+
approval_request_id: import_v419.z.string().nullish()
|
|
2938
2969
|
}),
|
|
2939
2970
|
import_v419.z.object({
|
|
2940
2971
|
type: import_v419.z.literal("mcp_list_tools"),
|
|
@@ -3091,7 +3122,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
3091
3122
|
code: import_v419.z.union([import_v419.z.number(), import_v419.z.string()]).optional(),
|
|
3092
3123
|
message: import_v419.z.string().optional()
|
|
3093
3124
|
}).loose()
|
|
3094
|
-
]).nullish()
|
|
3125
|
+
]).nullish(),
|
|
3126
|
+
approval_request_id: import_v419.z.string().nullish()
|
|
3095
3127
|
}),
|
|
3096
3128
|
import_v419.z.object({
|
|
3097
3129
|
type: import_v419.z.literal("mcp_list_tools"),
|
|
@@ -3120,7 +3152,7 @@ var openaiResponsesChunkSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
3120
3152
|
server_label: import_v419.z.string(),
|
|
3121
3153
|
name: import_v419.z.string(),
|
|
3122
3154
|
arguments: import_v419.z.string(),
|
|
3123
|
-
approval_request_id: import_v419.z.string()
|
|
3155
|
+
approval_request_id: import_v419.z.string().optional()
|
|
3124
3156
|
}),
|
|
3125
3157
|
import_v419.z.object({
|
|
3126
3158
|
type: import_v419.z.literal("apply_patch_call"),
|
|
@@ -3443,7 +3475,8 @@ var openaiResponsesResponseSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
3443
3475
|
code: import_v419.z.union([import_v419.z.number(), import_v419.z.string()]).optional(),
|
|
3444
3476
|
message: import_v419.z.string().optional()
|
|
3445
3477
|
}).loose()
|
|
3446
|
-
]).nullish()
|
|
3478
|
+
]).nullish(),
|
|
3479
|
+
approval_request_id: import_v419.z.string().nullish()
|
|
3447
3480
|
}),
|
|
3448
3481
|
import_v419.z.object({
|
|
3449
3482
|
type: import_v419.z.literal("mcp_list_tools"),
|
|
@@ -3472,7 +3505,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils24.lazySchema)(
|
|
|
3472
3505
|
server_label: import_v419.z.string(),
|
|
3473
3506
|
name: import_v419.z.string(),
|
|
3474
3507
|
arguments: import_v419.z.string(),
|
|
3475
|
-
approval_request_id: import_v419.z.string()
|
|
3508
|
+
approval_request_id: import_v419.z.string().optional()
|
|
3476
3509
|
}),
|
|
3477
3510
|
import_v419.z.object({
|
|
3478
3511
|
type: import_v419.z.literal("apply_patch_call"),
|
|
@@ -3873,6 +3906,11 @@ async function prepareResponsesTools({
|
|
|
3873
3906
|
value: tool.args,
|
|
3874
3907
|
schema: mcpArgsSchema
|
|
3875
3908
|
});
|
|
3909
|
+
const mapApprovalFilter = (filter) => ({
|
|
3910
|
+
tool_names: filter.toolNames
|
|
3911
|
+
});
|
|
3912
|
+
const requireApproval = args.requireApproval;
|
|
3913
|
+
const requireApprovalParam = requireApproval == null ? void 0 : typeof requireApproval === "string" ? requireApproval : requireApproval.never != null ? { never: mapApprovalFilter(requireApproval.never) } : void 0;
|
|
3876
3914
|
openaiTools2.push({
|
|
3877
3915
|
type: "mcp",
|
|
3878
3916
|
server_label: args.serverLabel,
|
|
@@ -3883,7 +3921,7 @@ async function prepareResponsesTools({
|
|
|
3883
3921
|
authorization: args.authorization,
|
|
3884
3922
|
connector_id: args.connectorId,
|
|
3885
3923
|
headers: args.headers,
|
|
3886
|
-
require_approval: "never",
|
|
3924
|
+
require_approval: requireApprovalParam != null ? requireApprovalParam : "never",
|
|
3887
3925
|
server_description: args.serverDescription,
|
|
3888
3926
|
server_url: args.serverUrl
|
|
3889
3927
|
});
|
|
@@ -3925,6 +3963,21 @@ async function prepareResponsesTools({
|
|
|
3925
3963
|
}
|
|
3926
3964
|
|
|
3927
3965
|
// src/responses/openai-responses-language-model.ts
|
|
3966
|
+
function extractApprovalRequestIdToToolCallIdMapping(prompt) {
|
|
3967
|
+
var _a, _b;
|
|
3968
|
+
const mapping = {};
|
|
3969
|
+
for (const message of prompt) {
|
|
3970
|
+
if (message.role !== "assistant") continue;
|
|
3971
|
+
for (const part of message.content) {
|
|
3972
|
+
if (part.type !== "tool-call") continue;
|
|
3973
|
+
const approvalRequestId = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.approvalRequestId;
|
|
3974
|
+
if (approvalRequestId != null) {
|
|
3975
|
+
mapping[approvalRequestId] = part.toolCallId;
|
|
3976
|
+
}
|
|
3977
|
+
}
|
|
3978
|
+
}
|
|
3979
|
+
return mapping;
|
|
3980
|
+
}
|
|
3928
3981
|
var OpenAIResponsesLanguageModel = class {
|
|
3929
3982
|
constructor(modelId, config) {
|
|
3930
3983
|
this.specificationVersion = "v3";
|
|
@@ -4160,7 +4213,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4160
4213
|
};
|
|
4161
4214
|
}
|
|
4162
4215
|
async doGenerate(options) {
|
|
4163
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z;
|
|
4216
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E;
|
|
4164
4217
|
const {
|
|
4165
4218
|
args: body,
|
|
4166
4219
|
warnings,
|
|
@@ -4172,6 +4225,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4172
4225
|
modelId: this.modelId
|
|
4173
4226
|
});
|
|
4174
4227
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4228
|
+
const approvalRequestIdToDummyToolCallIdFromPrompt = extractApprovalRequestIdToToolCallIdMapping(options.prompt);
|
|
4175
4229
|
const {
|
|
4176
4230
|
responseHeaders,
|
|
4177
4231
|
value: response,
|
|
@@ -4388,17 +4442,20 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4388
4442
|
break;
|
|
4389
4443
|
}
|
|
4390
4444
|
case "mcp_call": {
|
|
4445
|
+
const toolCallId = part.approval_request_id != null ? (_v = approvalRequestIdToDummyToolCallIdFromPrompt[part.approval_request_id]) != null ? _v : part.id : part.id;
|
|
4446
|
+
const toolName = `mcp.${part.name}`;
|
|
4391
4447
|
content.push({
|
|
4392
4448
|
type: "tool-call",
|
|
4393
|
-
toolCallId
|
|
4394
|
-
toolName
|
|
4395
|
-
input:
|
|
4396
|
-
providerExecuted: true
|
|
4449
|
+
toolCallId,
|
|
4450
|
+
toolName,
|
|
4451
|
+
input: part.arguments,
|
|
4452
|
+
providerExecuted: true,
|
|
4453
|
+
dynamic: true
|
|
4397
4454
|
});
|
|
4398
4455
|
content.push({
|
|
4399
4456
|
type: "tool-result",
|
|
4400
|
-
toolCallId
|
|
4401
|
-
toolName
|
|
4457
|
+
toolCallId,
|
|
4458
|
+
toolName,
|
|
4402
4459
|
result: {
|
|
4403
4460
|
type: "call",
|
|
4404
4461
|
serverLabel: part.server_label,
|
|
@@ -4406,6 +4463,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4406
4463
|
arguments: part.arguments,
|
|
4407
4464
|
...part.output != null ? { output: part.output } : {},
|
|
4408
4465
|
...part.error != null ? { error: part.error } : {}
|
|
4466
|
+
},
|
|
4467
|
+
providerMetadata: {
|
|
4468
|
+
[providerKey]: {
|
|
4469
|
+
itemId: part.id
|
|
4470
|
+
}
|
|
4409
4471
|
}
|
|
4410
4472
|
});
|
|
4411
4473
|
break;
|
|
@@ -4414,6 +4476,22 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4414
4476
|
break;
|
|
4415
4477
|
}
|
|
4416
4478
|
case "mcp_approval_request": {
|
|
4479
|
+
const approvalRequestId = (_w = part.approval_request_id) != null ? _w : part.id;
|
|
4480
|
+
const dummyToolCallId = (_z = (_y = (_x = this.config).generateId) == null ? void 0 : _y.call(_x)) != null ? _z : (0, import_provider_utils27.generateId)();
|
|
4481
|
+
const toolName = `mcp.${part.name}`;
|
|
4482
|
+
content.push({
|
|
4483
|
+
type: "tool-call",
|
|
4484
|
+
toolCallId: dummyToolCallId,
|
|
4485
|
+
toolName,
|
|
4486
|
+
input: part.arguments,
|
|
4487
|
+
providerExecuted: true,
|
|
4488
|
+
dynamic: true
|
|
4489
|
+
});
|
|
4490
|
+
content.push({
|
|
4491
|
+
type: "tool-approval-request",
|
|
4492
|
+
approvalId: approvalRequestId,
|
|
4493
|
+
toolCallId: dummyToolCallId
|
|
4494
|
+
});
|
|
4417
4495
|
break;
|
|
4418
4496
|
}
|
|
4419
4497
|
case "computer_call": {
|
|
@@ -4449,13 +4527,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4449
4527
|
toolName: toolNameMapping.toCustomToolName("file_search"),
|
|
4450
4528
|
result: {
|
|
4451
4529
|
queries: part.queries,
|
|
4452
|
-
results: (
|
|
4530
|
+
results: (_B = (_A = part.results) == null ? void 0 : _A.map((result) => ({
|
|
4453
4531
|
attributes: result.attributes,
|
|
4454
4532
|
fileId: result.file_id,
|
|
4455
4533
|
filename: result.filename,
|
|
4456
4534
|
score: result.score,
|
|
4457
4535
|
text: result.text
|
|
4458
|
-
}))) != null ?
|
|
4536
|
+
}))) != null ? _B : null
|
|
4459
4537
|
}
|
|
4460
4538
|
});
|
|
4461
4539
|
break;
|
|
@@ -4514,10 +4592,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4514
4592
|
content,
|
|
4515
4593
|
finishReason: {
|
|
4516
4594
|
unified: mapOpenAIResponseFinishReason({
|
|
4517
|
-
finishReason: (
|
|
4595
|
+
finishReason: (_C = response.incomplete_details) == null ? void 0 : _C.reason,
|
|
4518
4596
|
hasFunctionCall
|
|
4519
4597
|
}),
|
|
4520
|
-
raw: (
|
|
4598
|
+
raw: (_E = (_D = response.incomplete_details) == null ? void 0 : _D.reason) != null ? _E : void 0
|
|
4521
4599
|
},
|
|
4522
4600
|
usage: convertOpenAIResponsesUsage(usage),
|
|
4523
4601
|
request: { body },
|
|
@@ -4559,6 +4637,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4559
4637
|
});
|
|
4560
4638
|
const self = this;
|
|
4561
4639
|
const providerKey = this.config.provider.replace(".responses", "");
|
|
4640
|
+
const approvalRequestIdToDummyToolCallIdFromPrompt = extractApprovalRequestIdToToolCallIdMapping(options.prompt);
|
|
4641
|
+
const approvalRequestIdToDummyToolCallIdFromStream = /* @__PURE__ */ new Map();
|
|
4562
4642
|
let finishReason = {
|
|
4563
4643
|
unified: "other",
|
|
4564
4644
|
raw: void 0
|
|
@@ -4578,7 +4658,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4578
4658
|
controller.enqueue({ type: "stream-start", warnings });
|
|
4579
4659
|
},
|
|
4580
4660
|
transform(chunk, controller) {
|
|
4581
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C;
|
|
4661
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
|
|
4582
4662
|
if (options.includeRawChunks) {
|
|
4583
4663
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
4584
4664
|
}
|
|
@@ -4674,13 +4754,6 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4674
4754
|
providerExecuted: true
|
|
4675
4755
|
});
|
|
4676
4756
|
} else if (value.item.type === "mcp_call" || value.item.type === "mcp_list_tools" || value.item.type === "mcp_approval_request") {
|
|
4677
|
-
controller.enqueue({
|
|
4678
|
-
type: "tool-call",
|
|
4679
|
-
toolCallId: value.item.id,
|
|
4680
|
-
toolName: toolNameMapping.toCustomToolName("mcp"),
|
|
4681
|
-
input: "{}",
|
|
4682
|
-
providerExecuted: true
|
|
4683
|
-
});
|
|
4684
4757
|
} else if (value.item.type === "apply_patch_call") {
|
|
4685
4758
|
const { call_id: callId, operation } = value.item;
|
|
4686
4759
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -4852,10 +4925,23 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4852
4925
|
});
|
|
4853
4926
|
} else if (value.item.type === "mcp_call") {
|
|
4854
4927
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4928
|
+
const approvalRequestId = (_d = value.item.approval_request_id) != null ? _d : void 0;
|
|
4929
|
+
const aliasedToolCallId = approvalRequestId != null ? (_f = (_e = approvalRequestIdToDummyToolCallIdFromStream.get(
|
|
4930
|
+
approvalRequestId
|
|
4931
|
+
)) != null ? _e : approvalRequestIdToDummyToolCallIdFromPrompt[approvalRequestId]) != null ? _f : value.item.id : value.item.id;
|
|
4932
|
+
const toolName = `mcp.${value.item.name}`;
|
|
4933
|
+
controller.enqueue({
|
|
4934
|
+
type: "tool-call",
|
|
4935
|
+
toolCallId: aliasedToolCallId,
|
|
4936
|
+
toolName,
|
|
4937
|
+
input: value.item.arguments,
|
|
4938
|
+
providerExecuted: true,
|
|
4939
|
+
dynamic: true
|
|
4940
|
+
});
|
|
4855
4941
|
controller.enqueue({
|
|
4856
4942
|
type: "tool-result",
|
|
4857
|
-
toolCallId:
|
|
4858
|
-
toolName
|
|
4943
|
+
toolCallId: aliasedToolCallId,
|
|
4944
|
+
toolName,
|
|
4859
4945
|
result: {
|
|
4860
4946
|
type: "call",
|
|
4861
4947
|
serverLabel: value.item.server_label,
|
|
@@ -4863,6 +4949,11 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4863
4949
|
arguments: value.item.arguments,
|
|
4864
4950
|
...value.item.output != null ? { output: value.item.output } : {},
|
|
4865
4951
|
...value.item.error != null ? { error: value.item.error } : {}
|
|
4952
|
+
},
|
|
4953
|
+
providerMetadata: {
|
|
4954
|
+
[providerKey]: {
|
|
4955
|
+
itemId: value.item.id
|
|
4956
|
+
}
|
|
4866
4957
|
}
|
|
4867
4958
|
});
|
|
4868
4959
|
} else if (value.item.type === "mcp_list_tools") {
|
|
@@ -4907,6 +4998,26 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4907
4998
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4908
4999
|
} else if (value.item.type === "mcp_approval_request") {
|
|
4909
5000
|
ongoingToolCalls[value.output_index] = void 0;
|
|
5001
|
+
const dummyToolCallId = (_i = (_h = (_g = self.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils27.generateId)();
|
|
5002
|
+
const approvalRequestId = (_j = value.item.approval_request_id) != null ? _j : value.item.id;
|
|
5003
|
+
approvalRequestIdToDummyToolCallIdFromStream.set(
|
|
5004
|
+
approvalRequestId,
|
|
5005
|
+
dummyToolCallId
|
|
5006
|
+
);
|
|
5007
|
+
const toolName = `mcp.${value.item.name}`;
|
|
5008
|
+
controller.enqueue({
|
|
5009
|
+
type: "tool-call",
|
|
5010
|
+
toolCallId: dummyToolCallId,
|
|
5011
|
+
toolName,
|
|
5012
|
+
input: value.item.arguments,
|
|
5013
|
+
providerExecuted: true,
|
|
5014
|
+
dynamic: true
|
|
5015
|
+
});
|
|
5016
|
+
controller.enqueue({
|
|
5017
|
+
type: "tool-approval-request",
|
|
5018
|
+
approvalId: approvalRequestId,
|
|
5019
|
+
toolCallId: dummyToolCallId
|
|
5020
|
+
});
|
|
4910
5021
|
} else if (value.item.type === "local_shell_call") {
|
|
4911
5022
|
ongoingToolCalls[value.output_index] = void 0;
|
|
4912
5023
|
controller.enqueue({
|
|
@@ -4956,7 +5067,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4956
5067
|
providerMetadata: {
|
|
4957
5068
|
[providerKey]: {
|
|
4958
5069
|
itemId: value.item.id,
|
|
4959
|
-
reasoningEncryptedContent: (
|
|
5070
|
+
reasoningEncryptedContent: (_k = value.item.encrypted_content) != null ? _k : null
|
|
4960
5071
|
}
|
|
4961
5072
|
}
|
|
4962
5073
|
});
|
|
@@ -5060,7 +5171,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5060
5171
|
id: value.item_id,
|
|
5061
5172
|
delta: value.delta
|
|
5062
5173
|
});
|
|
5063
|
-
if (((
|
|
5174
|
+
if (((_m = (_l = options.providerOptions) == null ? void 0 : _l.openai) == null ? void 0 : _m.logprobs) && value.logprobs) {
|
|
5064
5175
|
logprobs.push(value.logprobs);
|
|
5065
5176
|
}
|
|
5066
5177
|
} else if (value.type === "response.reasoning_summary_part.added") {
|
|
@@ -5087,7 +5198,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5087
5198
|
providerMetadata: {
|
|
5088
5199
|
[providerKey]: {
|
|
5089
5200
|
itemId: value.item_id,
|
|
5090
|
-
reasoningEncryptedContent: (
|
|
5201
|
+
reasoningEncryptedContent: (_o = (_n = activeReasoning[value.item_id]) == null ? void 0 : _n.encryptedContent) != null ? _o : null
|
|
5091
5202
|
}
|
|
5092
5203
|
}
|
|
5093
5204
|
});
|
|
@@ -5119,10 +5230,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5119
5230
|
} else if (isResponseFinishedChunk(value)) {
|
|
5120
5231
|
finishReason = {
|
|
5121
5232
|
unified: mapOpenAIResponseFinishReason({
|
|
5122
|
-
finishReason: (
|
|
5233
|
+
finishReason: (_p = value.response.incomplete_details) == null ? void 0 : _p.reason,
|
|
5123
5234
|
hasFunctionCall
|
|
5124
5235
|
}),
|
|
5125
|
-
raw: (
|
|
5236
|
+
raw: (_r = (_q = value.response.incomplete_details) == null ? void 0 : _q.reason) != null ? _r : void 0
|
|
5126
5237
|
};
|
|
5127
5238
|
usage = value.response.usage;
|
|
5128
5239
|
if (typeof value.response.service_tier === "string") {
|
|
@@ -5134,7 +5245,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5134
5245
|
controller.enqueue({
|
|
5135
5246
|
type: "source",
|
|
5136
5247
|
sourceType: "url",
|
|
5137
|
-
id: (
|
|
5248
|
+
id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils27.generateId)(),
|
|
5138
5249
|
url: value.annotation.url,
|
|
5139
5250
|
title: value.annotation.title
|
|
5140
5251
|
});
|
|
@@ -5142,10 +5253,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5142
5253
|
controller.enqueue({
|
|
5143
5254
|
type: "source",
|
|
5144
5255
|
sourceType: "document",
|
|
5145
|
-
id: (
|
|
5256
|
+
id: (_x = (_w = (_v = self.config).generateId) == null ? void 0 : _w.call(_v)) != null ? _x : (0, import_provider_utils27.generateId)(),
|
|
5146
5257
|
mediaType: "text/plain",
|
|
5147
|
-
title: (
|
|
5148
|
-
filename: (
|
|
5258
|
+
title: (_z = (_y = value.annotation.quote) != null ? _y : value.annotation.filename) != null ? _z : "Document",
|
|
5259
|
+
filename: (_A = value.annotation.filename) != null ? _A : value.annotation.file_id,
|
|
5149
5260
|
...value.annotation.file_id ? {
|
|
5150
5261
|
providerMetadata: {
|
|
5151
5262
|
[providerKey]: {
|
|
@@ -5158,10 +5269,10 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5158
5269
|
controller.enqueue({
|
|
5159
5270
|
type: "source",
|
|
5160
5271
|
sourceType: "document",
|
|
5161
|
-
id: (
|
|
5272
|
+
id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : (0, import_provider_utils27.generateId)(),
|
|
5162
5273
|
mediaType: "text/plain",
|
|
5163
|
-
title: (
|
|
5164
|
-
filename: (
|
|
5274
|
+
title: (_F = (_E = value.annotation.filename) != null ? _E : value.annotation.file_id) != null ? _F : "Document",
|
|
5275
|
+
filename: (_G = value.annotation.filename) != null ? _G : value.annotation.file_id,
|
|
5165
5276
|
providerMetadata: {
|
|
5166
5277
|
[providerKey]: {
|
|
5167
5278
|
fileId: value.annotation.file_id,
|
|
@@ -5174,7 +5285,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
5174
5285
|
controller.enqueue({
|
|
5175
5286
|
type: "source",
|
|
5176
5287
|
sourceType: "document",
|
|
5177
|
-
id: (
|
|
5288
|
+
id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : (0, import_provider_utils27.generateId)(),
|
|
5178
5289
|
mediaType: "application/octet-stream",
|
|
5179
5290
|
title: value.annotation.file_id,
|
|
5180
5291
|
filename: value.annotation.file_id,
|
|
@@ -5633,7 +5744,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5633
5744
|
};
|
|
5634
5745
|
|
|
5635
5746
|
// src/version.ts
|
|
5636
|
-
var VERSION = true ? "3.0.
|
|
5747
|
+
var VERSION = true ? "3.0.1" : "0.0.0-test";
|
|
5637
5748
|
|
|
5638
5749
|
// src/openai-provider.ts
|
|
5639
5750
|
function createOpenAI(options = {}) {
|