@zenning/ai 6.0.13 → 6.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/dist/index.js +39 -26
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +39 -26
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +2 -3
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +2 -3
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +5 -5
package/dist/index.mjs
CHANGED
|
@@ -898,7 +898,7 @@ import {
|
|
|
898
898
|
} from "@zenning/provider-utils";
|
|
899
899
|
|
|
900
900
|
// src/version.ts
|
|
901
|
-
var VERSION = true ? "6.0.
|
|
901
|
+
var VERSION = true ? "6.0.14" : "0.0.0-test";
|
|
902
902
|
|
|
903
903
|
// src/util/download/download.ts
|
|
904
904
|
var download = async ({ url }) => {
|
|
@@ -1702,8 +1702,7 @@ var assistantModelMessageSchema = z5.object({
|
|
|
1702
1702
|
reasoningPartSchema,
|
|
1703
1703
|
toolCallPartSchema,
|
|
1704
1704
|
toolResultPartSchema,
|
|
1705
|
-
toolApprovalRequestSchema
|
|
1706
|
-
compactionPartSchema
|
|
1705
|
+
toolApprovalRequestSchema
|
|
1707
1706
|
])
|
|
1708
1707
|
)
|
|
1709
1708
|
]),
|
|
@@ -2285,7 +2284,8 @@ function prepareRetries({
|
|
|
2285
2284
|
|
|
2286
2285
|
// src/generate-text/collect-tool-approvals.ts
|
|
2287
2286
|
function collectToolApprovals({
|
|
2288
|
-
messages
|
|
2287
|
+
messages,
|
|
2288
|
+
skipValidation = false
|
|
2289
2289
|
}) {
|
|
2290
2290
|
const lastMessage = messages.at(-1);
|
|
2291
2291
|
if ((lastMessage == null ? void 0 : lastMessage.role) != "tool") {
|
|
@@ -2330,19 +2330,25 @@ function collectToolApprovals({
|
|
|
2330
2330
|
for (const approvalResponse of approvalResponses) {
|
|
2331
2331
|
const approvalRequest = toolApprovalRequestsByApprovalId[approvalResponse.approvalId];
|
|
2332
2332
|
if (approvalRequest == null) {
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2333
|
+
if (!skipValidation) {
|
|
2334
|
+
throw new InvalidToolApprovalError({
|
|
2335
|
+
approvalId: approvalResponse.approvalId
|
|
2336
|
+
});
|
|
2337
|
+
}
|
|
2338
|
+
continue;
|
|
2336
2339
|
}
|
|
2337
2340
|
if (toolResults[approvalRequest.toolCallId] != null) {
|
|
2338
2341
|
continue;
|
|
2339
2342
|
}
|
|
2340
2343
|
const toolCall = toolCallsByToolCallId[approvalRequest.toolCallId];
|
|
2341
2344
|
if (toolCall == null) {
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2345
|
+
if (!skipValidation) {
|
|
2346
|
+
throw new ToolCallNotFoundForApprovalError({
|
|
2347
|
+
toolCallId: approvalRequest.toolCallId,
|
|
2348
|
+
approvalId: approvalRequest.approvalId
|
|
2349
|
+
});
|
|
2350
|
+
}
|
|
2351
|
+
continue;
|
|
2346
2352
|
}
|
|
2347
2353
|
const approval = {
|
|
2348
2354
|
approvalRequest,
|
|
@@ -3584,10 +3590,13 @@ async function generateText({
|
|
|
3584
3590
|
}),
|
|
3585
3591
|
tracer,
|
|
3586
3592
|
fn: async (span) => {
|
|
3587
|
-
var _a16, _b, _c, _d, _e, _f, _g, _h;
|
|
3593
|
+
var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
3588
3594
|
const initialMessages = initialPrompt.messages;
|
|
3589
3595
|
const responseMessages = [];
|
|
3590
|
-
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
3596
|
+
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
3597
|
+
messages: initialMessages,
|
|
3598
|
+
skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
|
|
3599
|
+
});
|
|
3591
3600
|
const localApprovedToolApprovals = approvedToolApprovals.filter(
|
|
3592
3601
|
(toolApproval) => !toolApproval.toolCall.providerExecuted
|
|
3593
3602
|
);
|
|
@@ -3677,21 +3686,21 @@ async function generateText({
|
|
|
3677
3686
|
experimental_context
|
|
3678
3687
|
}));
|
|
3679
3688
|
const stepModel = resolveLanguageModel(
|
|
3680
|
-
(
|
|
3689
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
|
3681
3690
|
);
|
|
3682
3691
|
const promptMessages = await convertToLanguageModelPrompt({
|
|
3683
3692
|
prompt: {
|
|
3684
|
-
system: (
|
|
3685
|
-
messages: (
|
|
3693
|
+
system: (_c = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _c : initialPrompt.system,
|
|
3694
|
+
messages: (_d = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _d : stepInputMessages
|
|
3686
3695
|
},
|
|
3687
3696
|
supportedUrls: await stepModel.supportedUrls,
|
|
3688
3697
|
download: download2
|
|
3689
3698
|
});
|
|
3690
|
-
experimental_context = (
|
|
3699
|
+
experimental_context = (_e = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _e : experimental_context;
|
|
3691
3700
|
const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
|
|
3692
3701
|
tools,
|
|
3693
|
-
toolChoice: (
|
|
3694
|
-
activeTools: (
|
|
3702
|
+
toolChoice: (_f = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _f : toolChoice,
|
|
3703
|
+
activeTools: (_g = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _g : activeTools
|
|
3695
3704
|
});
|
|
3696
3705
|
currentModelResponse = await retry(
|
|
3697
3706
|
() => {
|
|
@@ -3911,7 +3920,7 @@ async function generateText({
|
|
|
3911
3920
|
usage: asLanguageModelUsage(currentModelResponse.usage),
|
|
3912
3921
|
warnings: currentModelResponse.warnings,
|
|
3913
3922
|
providerMetadata: currentModelResponse.providerMetadata,
|
|
3914
|
-
request: (
|
|
3923
|
+
request: (_h = currentModelResponse.request) != null ? _h : {},
|
|
3915
3924
|
response: {
|
|
3916
3925
|
...currentModelResponse.response,
|
|
3917
3926
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
|
@@ -3919,7 +3928,7 @@ async function generateText({
|
|
|
3919
3928
|
}
|
|
3920
3929
|
});
|
|
3921
3930
|
logWarnings({
|
|
3922
|
-
warnings: (
|
|
3931
|
+
warnings: (_i = currentModelResponse.warnings) != null ? _i : [],
|
|
3923
3932
|
provider: stepModel.provider,
|
|
3924
3933
|
model: stepModel.modelId
|
|
3925
3934
|
});
|
|
@@ -6093,6 +6102,7 @@ var DefaultStreamTextResult = class {
|
|
|
6093
6102
|
tracer,
|
|
6094
6103
|
endWhenDone: false,
|
|
6095
6104
|
fn: async (rootSpanArg) => {
|
|
6105
|
+
var _a16;
|
|
6096
6106
|
rootSpan = rootSpanArg;
|
|
6097
6107
|
const initialPrompt = await standardizePrompt({
|
|
6098
6108
|
system,
|
|
@@ -6101,7 +6111,10 @@ var DefaultStreamTextResult = class {
|
|
|
6101
6111
|
});
|
|
6102
6112
|
const initialMessages = initialPrompt.messages;
|
|
6103
6113
|
const initialResponseMessages = [];
|
|
6104
|
-
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
6114
|
+
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
|
|
6115
|
+
messages: initialMessages,
|
|
6116
|
+
skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
|
|
6117
|
+
});
|
|
6105
6118
|
if (deniedToolApprovals.length > 0 || approvedToolApprovals.length > 0) {
|
|
6106
6119
|
const providerExecutedToolApprovals = [
|
|
6107
6120
|
...approvedToolApprovals,
|
|
@@ -6211,7 +6224,7 @@ var DefaultStreamTextResult = class {
|
|
|
6211
6224
|
responseMessages,
|
|
6212
6225
|
usage
|
|
6213
6226
|
}) {
|
|
6214
|
-
var
|
|
6227
|
+
var _a17, _b, _c, _d, _e, _f;
|
|
6215
6228
|
const includeRawChunks2 = self.includeRawChunks;
|
|
6216
6229
|
stepFinish = new DelayedPromise();
|
|
6217
6230
|
const stepInputMessages = [...initialMessages, ...responseMessages];
|
|
@@ -6223,7 +6236,7 @@ var DefaultStreamTextResult = class {
|
|
|
6223
6236
|
experimental_context
|
|
6224
6237
|
}));
|
|
6225
6238
|
const stepModel = resolveLanguageModel(
|
|
6226
|
-
(
|
|
6239
|
+
(_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model
|
|
6227
6240
|
);
|
|
6228
6241
|
const promptMessages = await convertToLanguageModelPrompt({
|
|
6229
6242
|
prompt: {
|
|
@@ -6336,7 +6349,7 @@ var DefaultStreamTextResult = class {
|
|
|
6336
6349
|
streamWithToolResults.pipeThrough(
|
|
6337
6350
|
new TransformStream({
|
|
6338
6351
|
async transform(chunk, controller) {
|
|
6339
|
-
var
|
|
6352
|
+
var _a18, _b2, _c2, _d2, _e2;
|
|
6340
6353
|
if (chunk.type === "stream-start") {
|
|
6341
6354
|
warnings = chunk.warnings;
|
|
6342
6355
|
return;
|
|
@@ -6409,7 +6422,7 @@ var DefaultStreamTextResult = class {
|
|
|
6409
6422
|
}
|
|
6410
6423
|
case "response-metadata": {
|
|
6411
6424
|
stepResponse = {
|
|
6412
|
-
id: (
|
|
6425
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
|
6413
6426
|
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
|
6414
6427
|
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
|
6415
6428
|
};
|