ai 3.4.3 → 3.4.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.js +99 -42
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +99 -42
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.js
CHANGED
@@ -2977,6 +2977,19 @@ function prepareToolsAndToolChoice({
|
|
2977
2977
|
};
|
2978
2978
|
}
|
2979
2979
|
|
2980
|
+
// core/util/split-on-last-whitespace.ts
|
2981
|
+
var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
|
2982
|
+
function splitOnLastWhitespace(text) {
|
2983
|
+
const match = text.match(lastWhitespaceRegexp);
|
2984
|
+
return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
|
2985
|
+
}
|
2986
|
+
|
2987
|
+
// core/util/remove-text-after-last-whitespace.ts
|
2988
|
+
function removeTextAfterLastWhitespace(text) {
|
2989
|
+
const match = splitOnLastWhitespace(text);
|
2990
|
+
return match ? match.prefix + match.whitespace : text;
|
2991
|
+
}
|
2992
|
+
|
2980
2993
|
// core/generate-text/parse-tool-call.ts
|
2981
2994
|
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
2982
2995
|
var import_ui_utils4 = require("@ai-sdk/ui-utils");
|
@@ -3097,7 +3110,7 @@ async function generateText({
|
|
3097
3110
|
}),
|
3098
3111
|
tracer,
|
3099
3112
|
fn: async (span) => {
|
3100
|
-
var _a12, _b, _c, _d, _e
|
3113
|
+
var _a12, _b, _c, _d, _e;
|
3101
3114
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3102
3115
|
const validatedPrompt = validatePrompt({
|
3103
3116
|
system,
|
@@ -3157,7 +3170,7 @@ async function generateText({
|
|
3157
3170
|
}),
|
3158
3171
|
tracer,
|
3159
3172
|
fn: async (span2) => {
|
3160
|
-
var _a13, _b2, _c2, _d2, _e2,
|
3173
|
+
var _a13, _b2, _c2, _d2, _e2, _f;
|
3161
3174
|
const result = await model.doGenerate({
|
3162
3175
|
mode,
|
3163
3176
|
...callSettings,
|
@@ -3170,7 +3183,7 @@ async function generateText({
|
|
3170
3183
|
const responseData = {
|
3171
3184
|
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3172
3185
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3173
|
-
modelId: (
|
3186
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
|
3174
3187
|
};
|
3175
3188
|
span2.setAttributes(
|
3176
3189
|
selectTelemetryAttributes({
|
@@ -3224,14 +3237,24 @@ async function generateText({
|
|
3224
3237
|
usage.completionTokens += currentUsage.completionTokens;
|
3225
3238
|
usage.promptTokens += currentUsage.promptTokens;
|
3226
3239
|
usage.totalTokens += currentUsage.totalTokens;
|
3227
|
-
|
3228
|
-
|
3229
|
-
|
3230
|
-
|
3240
|
+
let nextStepType = "done";
|
3241
|
+
if (++stepCount < maxSteps) {
|
3242
|
+
if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
|
3243
|
+
currentToolCalls.length === 0) {
|
3244
|
+
nextStepType = "continue";
|
3245
|
+
} else if (
|
3246
|
+
// there are tool calls:
|
3247
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
3248
|
+
currentToolResults.length === currentToolCalls.length
|
3249
|
+
) {
|
3250
|
+
nextStepType = "tool-result";
|
3251
|
+
}
|
3231
3252
|
}
|
3253
|
+
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
|
3254
|
+
text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
|
3232
3255
|
const currentStep = {
|
3233
3256
|
stepType,
|
3234
|
-
text:
|
3257
|
+
text: stepText,
|
3235
3258
|
toolCalls: currentToolCalls,
|
3236
3259
|
toolResults: currentToolResults,
|
3237
3260
|
finishReason: currentModelResponse.finishReason,
|
@@ -3240,7 +3263,7 @@ async function generateText({
|
|
3240
3263
|
logprobs: currentModelResponse.logprobs,
|
3241
3264
|
response: {
|
3242
3265
|
...currentModelResponse.response,
|
3243
|
-
headers: (
|
3266
|
+
headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
|
3244
3267
|
},
|
3245
3268
|
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3246
3269
|
};
|
@@ -3253,7 +3276,7 @@ async function generateText({
|
|
3253
3276
|
lastResponseMessage.content = text;
|
3254
3277
|
} else {
|
3255
3278
|
lastResponseMessage.content.push({
|
3256
|
-
text:
|
3279
|
+
text: stepText,
|
3257
3280
|
type: "text"
|
3258
3281
|
});
|
3259
3282
|
}
|
@@ -3261,6 +3284,18 @@ async function generateText({
|
|
3261
3284
|
promptMessages.push(
|
3262
3285
|
convertToLanguageModelMessage(lastResponseMessage, null)
|
3263
3286
|
);
|
3287
|
+
} else if (nextStepType === "continue") {
|
3288
|
+
const newResponseMessages = toResponseMessages({
|
3289
|
+
text,
|
3290
|
+
toolCalls: currentToolCalls,
|
3291
|
+
toolResults: currentToolResults
|
3292
|
+
});
|
3293
|
+
responseMessages.push(...newResponseMessages);
|
3294
|
+
promptMessages.push(
|
3295
|
+
...newResponseMessages.map(
|
3296
|
+
(message) => convertToLanguageModelMessage(message, null)
|
3297
|
+
)
|
3298
|
+
);
|
3264
3299
|
} else {
|
3265
3300
|
const newResponseMessages = toResponseMessages({
|
3266
3301
|
text: currentModelResponse.text,
|
@@ -3274,20 +3309,7 @@ async function generateText({
|
|
3274
3309
|
)
|
3275
3310
|
);
|
3276
3311
|
}
|
3277
|
-
|
3278
|
-
stepType = "done";
|
3279
|
-
} else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
|
3280
|
-
currentToolCalls.length === 0) {
|
3281
|
-
stepType = "continue";
|
3282
|
-
} else if (
|
3283
|
-
// there are tool calls:
|
3284
|
-
currentToolCalls.length > 0 && // all current tool calls have results:
|
3285
|
-
currentToolResults.length === currentToolCalls.length
|
3286
|
-
) {
|
3287
|
-
stepType = "tool-result";
|
3288
|
-
} else {
|
3289
|
-
stepType = "done";
|
3290
|
-
}
|
3312
|
+
stepType = nextStepType;
|
3291
3313
|
} while (stepType !== "done");
|
3292
3314
|
span.setAttributes(
|
3293
3315
|
selectTelemetryAttributes({
|
@@ -3322,7 +3344,7 @@ async function generateText({
|
|
3322
3344
|
warnings: currentModelResponse.warnings,
|
3323
3345
|
response: {
|
3324
3346
|
...currentModelResponse.response,
|
3325
|
-
headers: (
|
3347
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3326
3348
|
},
|
3327
3349
|
logprobs: currentModelResponse.logprobs,
|
3328
3350
|
responseMessages,
|
@@ -4011,6 +4033,18 @@ var DefaultStreamTextResult = class {
|
|
4011
4033
|
timestamp: currentDate(),
|
4012
4034
|
modelId
|
4013
4035
|
};
|
4036
|
+
let chunkBuffer = "";
|
4037
|
+
let chunkTextPublished = false;
|
4038
|
+
async function publishTextChunk({
|
4039
|
+
controller,
|
4040
|
+
chunk
|
4041
|
+
}) {
|
4042
|
+
controller.enqueue(chunk);
|
4043
|
+
stepText += chunk.textDelta;
|
4044
|
+
fullStepText += chunk.textDelta;
|
4045
|
+
chunkTextPublished = true;
|
4046
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4047
|
+
}
|
4014
4048
|
addStream(
|
4015
4049
|
stream2.pipeThrough(
|
4016
4050
|
new TransformStream({
|
@@ -4036,10 +4070,22 @@ var DefaultStreamTextResult = class {
|
|
4036
4070
|
const chunkType = chunk.type;
|
4037
4071
|
switch (chunkType) {
|
4038
4072
|
case "text-delta": {
|
4039
|
-
|
4040
|
-
|
4041
|
-
|
4042
|
-
|
4073
|
+
if (continueSteps) {
|
4074
|
+
chunkBuffer += chunk.textDelta;
|
4075
|
+
const split = splitOnLastWhitespace(chunkBuffer);
|
4076
|
+
if (split != null) {
|
4077
|
+
chunkBuffer = split.suffix;
|
4078
|
+
await publishTextChunk({
|
4079
|
+
controller,
|
4080
|
+
chunk: {
|
4081
|
+
type: "text-delta",
|
4082
|
+
textDelta: split.prefix + split.whitespace
|
4083
|
+
}
|
4084
|
+
});
|
4085
|
+
}
|
4086
|
+
} else {
|
4087
|
+
await publishTextChunk({ controller, chunk });
|
4088
|
+
}
|
4043
4089
|
break;
|
4044
4090
|
}
|
4045
4091
|
case "tool-call": {
|
@@ -4095,6 +4141,30 @@ var DefaultStreamTextResult = class {
|
|
4095
4141
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4096
4142
|
async flush(controller) {
|
4097
4143
|
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4144
|
+
let nextStepType = "done";
|
4145
|
+
if (currentStep + 1 < maxSteps) {
|
4146
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4147
|
+
stepToolCalls.length === 0) {
|
4148
|
+
nextStepType = "continue";
|
4149
|
+
} else if (
|
4150
|
+
// there are tool calls:
|
4151
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4152
|
+
stepToolResults.length === stepToolCalls.length
|
4153
|
+
) {
|
4154
|
+
nextStepType = "tool-result";
|
4155
|
+
}
|
4156
|
+
}
|
4157
|
+
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4158
|
+
stepType === "continue" && !chunkTextPublished)) {
|
4159
|
+
await publishTextChunk({
|
4160
|
+
controller,
|
4161
|
+
chunk: {
|
4162
|
+
type: "text-delta",
|
4163
|
+
textDelta: chunkBuffer
|
4164
|
+
}
|
4165
|
+
});
|
4166
|
+
chunkBuffer = "";
|
4167
|
+
}
|
4098
4168
|
try {
|
4099
4169
|
doStreamSpan2.setAttributes(
|
4100
4170
|
selectTelemetryAttributes({
|
@@ -4157,19 +4227,6 @@ var DefaultStreamTextResult = class {
|
|
4157
4227
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4158
4228
|
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4159
4229
|
};
|
4160
|
-
let nextStepType = "done";
|
4161
|
-
if (currentStep + 1 < maxSteps) {
|
4162
|
-
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4163
|
-
stepToolCalls.length === 0) {
|
4164
|
-
nextStepType = "continue";
|
4165
|
-
} else if (
|
4166
|
-
// there are tool calls:
|
4167
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
4168
|
-
stepToolResults.length === stepToolCalls.length
|
4169
|
-
) {
|
4170
|
-
nextStepType = "tool-result";
|
4171
|
-
}
|
4172
|
-
}
|
4173
4230
|
if (nextStepType !== "done") {
|
4174
4231
|
if (stepType === "continue") {
|
4175
4232
|
const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
|