@zenning/ai 6.0.22 → 6.0.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.js +54 -26
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +54 -26
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.mjs +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
package/dist/index.js
CHANGED
|
@@ -1001,7 +1001,7 @@ var import_provider_utils3 = require("@zenning/provider-utils");
|
|
|
1001
1001
|
var import_provider_utils4 = require("@zenning/provider-utils");
|
|
1002
1002
|
|
|
1003
1003
|
// src/version.ts
|
|
1004
|
-
var VERSION = true ? "6.0.
|
|
1004
|
+
var VERSION = true ? "6.0.24" : "0.0.0-test";
|
|
1005
1005
|
|
|
1006
1006
|
// src/util/download/download.ts
|
|
1007
1007
|
var download = async ({ url }) => {
|
|
@@ -2382,8 +2382,10 @@ function prepareRetries({
|
|
|
2382
2382
|
|
|
2383
2383
|
// src/generate-text/collect-tool-approvals.ts
|
|
2384
2384
|
function collectToolApprovals({
|
|
2385
|
-
messages
|
|
2385
|
+
messages,
|
|
2386
|
+
allowMissingApprovalContext = false
|
|
2386
2387
|
}) {
|
|
2388
|
+
var _a16;
|
|
2387
2389
|
const lastMessage = messages.at(-1);
|
|
2388
2390
|
if ((lastMessage == null ? void 0 : lastMessage.role) != "tool") {
|
|
2389
2391
|
return {
|
|
@@ -2425,21 +2427,40 @@ function collectToolApprovals({
|
|
|
2425
2427
|
(part) => part.type === "tool-approval-response"
|
|
2426
2428
|
);
|
|
2427
2429
|
for (const approvalResponse of approvalResponses) {
|
|
2428
|
-
|
|
2430
|
+
let approvalRequest = toolApprovalRequestsByApprovalId[approvalResponse.approvalId];
|
|
2429
2431
|
if (approvalRequest == null) {
|
|
2430
|
-
|
|
2431
|
-
|
|
2432
|
-
|
|
2432
|
+
if (allowMissingApprovalContext) {
|
|
2433
|
+
const toolCallId = (_a16 = approvalResponse.toolCallId) != null ? _a16 : approvalResponse.approvalId;
|
|
2434
|
+
approvalRequest = {
|
|
2435
|
+
type: "tool-approval-request",
|
|
2436
|
+
approvalId: approvalResponse.approvalId,
|
|
2437
|
+
toolCallId
|
|
2438
|
+
};
|
|
2439
|
+
} else {
|
|
2440
|
+
throw new InvalidToolApprovalError({
|
|
2441
|
+
approvalId: approvalResponse.approvalId
|
|
2442
|
+
});
|
|
2443
|
+
}
|
|
2433
2444
|
}
|
|
2434
2445
|
if (toolResults[approvalRequest.toolCallId] != null) {
|
|
2435
2446
|
continue;
|
|
2436
2447
|
}
|
|
2437
|
-
|
|
2448
|
+
let toolCall = toolCallsByToolCallId[approvalRequest.toolCallId];
|
|
2438
2449
|
if (toolCall == null) {
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
|
|
2450
|
+
if (allowMissingApprovalContext) {
|
|
2451
|
+
toolCall = {
|
|
2452
|
+
type: "tool-call",
|
|
2453
|
+
toolCallId: approvalRequest.toolCallId,
|
|
2454
|
+
toolName: "unknown",
|
|
2455
|
+
input: {},
|
|
2456
|
+
providerExecuted: true
|
|
2457
|
+
};
|
|
2458
|
+
} else {
|
|
2459
|
+
throw new ToolCallNotFoundForApprovalError({
|
|
2460
|
+
toolCallId: approvalRequest.toolCallId,
|
|
2461
|
+
approvalId: approvalRequest.approvalId
|
|
2462
|
+
});
|
|
2463
|
+
}
|
|
2443
2464
|
}
|
|
2444
2465
|
const approval = {
|
|
2445
2466
|
approvalRequest,
|
|
@@ -3667,10 +3688,13 @@ async function generateText({
|
|
|
3667
3688
|
}),
|
|
3668
3689
|
tracer,
|
|
3669
3690
|
fn: async (span) => {
|
|
3670
|
-
var _a16, _b, _c, _d, _e, _f, _g, _h;
|
|
3691
|
+
var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
3671
3692
|
const initialMessages = initialPrompt.messages;
|
|
3672
3693
|
const responseMessages = [];
|
|
3673
|
-
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
3694
|
+
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
3695
|
+
messages: initialMessages,
|
|
3696
|
+
allowMissingApprovalContext: !!((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId)
|
|
3697
|
+
});
|
|
3674
3698
|
const localApprovedToolApprovals = approvedToolApprovals.filter(
|
|
3675
3699
|
(toolApproval) => !toolApproval.toolCall.providerExecuted
|
|
3676
3700
|
);
|
|
@@ -3760,21 +3784,21 @@ async function generateText({
|
|
|
3760
3784
|
experimental_context
|
|
3761
3785
|
}));
|
|
3762
3786
|
const stepModel = resolveLanguageModel(
|
|
3763
|
-
(
|
|
3787
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
|
3764
3788
|
);
|
|
3765
3789
|
const promptMessages = await convertToLanguageModelPrompt({
|
|
3766
3790
|
prompt: {
|
|
3767
|
-
system: (
|
|
3768
|
-
messages: (
|
|
3791
|
+
system: (_c = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _c : initialPrompt.system,
|
|
3792
|
+
messages: (_d = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _d : stepInputMessages
|
|
3769
3793
|
},
|
|
3770
3794
|
supportedUrls: await stepModel.supportedUrls,
|
|
3771
3795
|
download: download2
|
|
3772
3796
|
});
|
|
3773
|
-
experimental_context = (
|
|
3797
|
+
experimental_context = (_e = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _e : experimental_context;
|
|
3774
3798
|
const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
|
|
3775
3799
|
tools,
|
|
3776
|
-
toolChoice: (
|
|
3777
|
-
activeTools: (
|
|
3800
|
+
toolChoice: (_f = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _f : toolChoice,
|
|
3801
|
+
activeTools: (_g = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _g : activeTools
|
|
3778
3802
|
});
|
|
3779
3803
|
currentModelResponse = await retry(
|
|
3780
3804
|
() => {
|
|
@@ -3994,7 +4018,7 @@ async function generateText({
|
|
|
3994
4018
|
usage: asLanguageModelUsage(currentModelResponse.usage),
|
|
3995
4019
|
warnings: currentModelResponse.warnings,
|
|
3996
4020
|
providerMetadata: currentModelResponse.providerMetadata,
|
|
3997
|
-
request: (
|
|
4021
|
+
request: (_h = currentModelResponse.request) != null ? _h : {},
|
|
3998
4022
|
response: {
|
|
3999
4023
|
...currentModelResponse.response,
|
|
4000
4024
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
|
@@ -4002,7 +4026,7 @@ async function generateText({
|
|
|
4002
4026
|
}
|
|
4003
4027
|
});
|
|
4004
4028
|
logWarnings({
|
|
4005
|
-
warnings: (
|
|
4029
|
+
warnings: (_i = currentModelResponse.warnings) != null ? _i : [],
|
|
4006
4030
|
provider: stepModel.provider,
|
|
4007
4031
|
model: stepModel.modelId
|
|
4008
4032
|
});
|
|
@@ -6168,6 +6192,7 @@ var DefaultStreamTextResult = class {
|
|
|
6168
6192
|
tracer,
|
|
6169
6193
|
endWhenDone: false,
|
|
6170
6194
|
fn: async (rootSpanArg) => {
|
|
6195
|
+
var _a16;
|
|
6171
6196
|
rootSpan = rootSpanArg;
|
|
6172
6197
|
const initialPrompt = await standardizePrompt({
|
|
6173
6198
|
system,
|
|
@@ -6176,7 +6201,10 @@ var DefaultStreamTextResult = class {
|
|
|
6176
6201
|
});
|
|
6177
6202
|
const initialMessages = initialPrompt.messages;
|
|
6178
6203
|
const initialResponseMessages = [];
|
|
6179
|
-
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
6204
|
+
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
6205
|
+
messages: initialMessages,
|
|
6206
|
+
allowMissingApprovalContext: !!((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId)
|
|
6207
|
+
});
|
|
6180
6208
|
if (deniedToolApprovals.length > 0 || approvedToolApprovals.length > 0) {
|
|
6181
6209
|
const providerExecutedToolApprovals = [
|
|
6182
6210
|
...approvedToolApprovals,
|
|
@@ -6286,7 +6314,7 @@ var DefaultStreamTextResult = class {
|
|
|
6286
6314
|
responseMessages,
|
|
6287
6315
|
usage
|
|
6288
6316
|
}) {
|
|
6289
|
-
var
|
|
6317
|
+
var _a17, _b, _c, _d, _e, _f;
|
|
6290
6318
|
const includeRawChunks2 = self.includeRawChunks;
|
|
6291
6319
|
stepFinish = new import_provider_utils19.DelayedPromise();
|
|
6292
6320
|
const stepInputMessages = [...initialMessages, ...responseMessages];
|
|
@@ -6298,7 +6326,7 @@ var DefaultStreamTextResult = class {
|
|
|
6298
6326
|
experimental_context
|
|
6299
6327
|
}));
|
|
6300
6328
|
const stepModel = resolveLanguageModel(
|
|
6301
|
-
(
|
|
6329
|
+
(_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model
|
|
6302
6330
|
);
|
|
6303
6331
|
const promptMessages = await convertToLanguageModelPrompt({
|
|
6304
6332
|
prompt: {
|
|
@@ -6411,7 +6439,7 @@ var DefaultStreamTextResult = class {
|
|
|
6411
6439
|
streamWithToolResults.pipeThrough(
|
|
6412
6440
|
new TransformStream({
|
|
6413
6441
|
async transform(chunk, controller) {
|
|
6414
|
-
var
|
|
6442
|
+
var _a18, _b2, _c2, _d2, _e2;
|
|
6415
6443
|
if (chunk.type === "stream-start") {
|
|
6416
6444
|
warnings = chunk.warnings;
|
|
6417
6445
|
return;
|
|
@@ -6484,7 +6512,7 @@ var DefaultStreamTextResult = class {
|
|
|
6484
6512
|
}
|
|
6485
6513
|
case "response-metadata": {
|
|
6486
6514
|
stepResponse = {
|
|
6487
|
-
id: (
|
|
6515
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
|
6488
6516
|
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
|
6489
6517
|
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
|
6490
6518
|
};
|