ai 3.4.3 → 3.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/dist/index.d.mts +6 -1
- package/dist/index.d.ts +6 -1
- package/dist/index.js +107 -46
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +107 -46
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.mjs
CHANGED
@@ -2915,6 +2915,19 @@ function prepareToolsAndToolChoice({
|
|
2915
2915
|
};
|
2916
2916
|
}
|
2917
2917
|
|
2918
|
+
// core/util/split-on-last-whitespace.ts
|
2919
|
+
var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
|
2920
|
+
function splitOnLastWhitespace(text) {
|
2921
|
+
const match = text.match(lastWhitespaceRegexp);
|
2922
|
+
return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
|
2923
|
+
}
|
2924
|
+
|
2925
|
+
// core/util/remove-text-after-last-whitespace.ts
|
2926
|
+
function removeTextAfterLastWhitespace(text) {
|
2927
|
+
const match = splitOnLastWhitespace(text);
|
2928
|
+
return match ? match.prefix + match.whitespace : text;
|
2929
|
+
}
|
2930
|
+
|
2918
2931
|
// core/generate-text/parse-tool-call.ts
|
2919
2932
|
import { safeParseJSON as safeParseJSON2, safeValidateTypes as safeValidateTypes3 } from "@ai-sdk/provider-utils";
|
2920
2933
|
import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
|
@@ -3035,7 +3048,7 @@ async function generateText({
|
|
3035
3048
|
}),
|
3036
3049
|
tracer,
|
3037
3050
|
fn: async (span) => {
|
3038
|
-
var _a12, _b, _c, _d, _e
|
3051
|
+
var _a12, _b, _c, _d, _e;
|
3039
3052
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3040
3053
|
const validatedPrompt = validatePrompt({
|
3041
3054
|
system,
|
@@ -3095,7 +3108,7 @@ async function generateText({
|
|
3095
3108
|
}),
|
3096
3109
|
tracer,
|
3097
3110
|
fn: async (span2) => {
|
3098
|
-
var _a13, _b2, _c2, _d2, _e2,
|
3111
|
+
var _a13, _b2, _c2, _d2, _e2, _f;
|
3099
3112
|
const result = await model.doGenerate({
|
3100
3113
|
mode,
|
3101
3114
|
...callSettings,
|
@@ -3108,7 +3121,7 @@ async function generateText({
|
|
3108
3121
|
const responseData = {
|
3109
3122
|
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3110
3123
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3111
|
-
modelId: (
|
3124
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
|
3112
3125
|
};
|
3113
3126
|
span2.setAttributes(
|
3114
3127
|
selectTelemetryAttributes({
|
@@ -3162,14 +3175,24 @@ async function generateText({
|
|
3162
3175
|
usage.completionTokens += currentUsage.completionTokens;
|
3163
3176
|
usage.promptTokens += currentUsage.promptTokens;
|
3164
3177
|
usage.totalTokens += currentUsage.totalTokens;
|
3165
|
-
|
3166
|
-
|
3167
|
-
|
3168
|
-
|
3178
|
+
let nextStepType = "done";
|
3179
|
+
if (++stepCount < maxSteps) {
|
3180
|
+
if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
|
3181
|
+
currentToolCalls.length === 0) {
|
3182
|
+
nextStepType = "continue";
|
3183
|
+
} else if (
|
3184
|
+
// there are tool calls:
|
3185
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
3186
|
+
currentToolResults.length === currentToolCalls.length
|
3187
|
+
) {
|
3188
|
+
nextStepType = "tool-result";
|
3189
|
+
}
|
3169
3190
|
}
|
3191
|
+
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
|
3192
|
+
text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
|
3170
3193
|
const currentStep = {
|
3171
3194
|
stepType,
|
3172
|
-
text:
|
3195
|
+
text: stepText,
|
3173
3196
|
toolCalls: currentToolCalls,
|
3174
3197
|
toolResults: currentToolResults,
|
3175
3198
|
finishReason: currentModelResponse.finishReason,
|
@@ -3178,9 +3201,10 @@ async function generateText({
|
|
3178
3201
|
logprobs: currentModelResponse.logprobs,
|
3179
3202
|
response: {
|
3180
3203
|
...currentModelResponse.response,
|
3181
|
-
headers: (
|
3204
|
+
headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
|
3182
3205
|
},
|
3183
|
-
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3206
|
+
experimental_providerMetadata: currentModelResponse.providerMetadata,
|
3207
|
+
isContinued: nextStepType === "continue"
|
3184
3208
|
};
|
3185
3209
|
steps.push(currentStep);
|
3186
3210
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
|
@@ -3191,7 +3215,7 @@ async function generateText({
|
|
3191
3215
|
lastResponseMessage.content = text;
|
3192
3216
|
} else {
|
3193
3217
|
lastResponseMessage.content.push({
|
3194
|
-
text:
|
3218
|
+
text: stepText,
|
3195
3219
|
type: "text"
|
3196
3220
|
});
|
3197
3221
|
}
|
@@ -3199,6 +3223,18 @@ async function generateText({
|
|
3199
3223
|
promptMessages.push(
|
3200
3224
|
convertToLanguageModelMessage(lastResponseMessage, null)
|
3201
3225
|
);
|
3226
|
+
} else if (nextStepType === "continue") {
|
3227
|
+
const newResponseMessages = toResponseMessages({
|
3228
|
+
text,
|
3229
|
+
toolCalls: currentToolCalls,
|
3230
|
+
toolResults: currentToolResults
|
3231
|
+
});
|
3232
|
+
responseMessages.push(...newResponseMessages);
|
3233
|
+
promptMessages.push(
|
3234
|
+
...newResponseMessages.map(
|
3235
|
+
(message) => convertToLanguageModelMessage(message, null)
|
3236
|
+
)
|
3237
|
+
);
|
3202
3238
|
} else {
|
3203
3239
|
const newResponseMessages = toResponseMessages({
|
3204
3240
|
text: currentModelResponse.text,
|
@@ -3212,20 +3248,7 @@ async function generateText({
|
|
3212
3248
|
)
|
3213
3249
|
);
|
3214
3250
|
}
|
3215
|
-
|
3216
|
-
stepType = "done";
|
3217
|
-
} else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
|
3218
|
-
currentToolCalls.length === 0) {
|
3219
|
-
stepType = "continue";
|
3220
|
-
} else if (
|
3221
|
-
// there are tool calls:
|
3222
|
-
currentToolCalls.length > 0 && // all current tool calls have results:
|
3223
|
-
currentToolResults.length === currentToolCalls.length
|
3224
|
-
) {
|
3225
|
-
stepType = "tool-result";
|
3226
|
-
} else {
|
3227
|
-
stepType = "done";
|
3228
|
-
}
|
3251
|
+
stepType = nextStepType;
|
3229
3252
|
} while (stepType !== "done");
|
3230
3253
|
span.setAttributes(
|
3231
3254
|
selectTelemetryAttributes({
|
@@ -3260,7 +3283,7 @@ async function generateText({
|
|
3260
3283
|
warnings: currentModelResponse.warnings,
|
3261
3284
|
response: {
|
3262
3285
|
...currentModelResponse.response,
|
3263
|
-
headers: (
|
3286
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3264
3287
|
},
|
3265
3288
|
logprobs: currentModelResponse.logprobs,
|
3266
3289
|
responseMessages,
|
@@ -3949,6 +3972,18 @@ var DefaultStreamTextResult = class {
|
|
3949
3972
|
timestamp: currentDate(),
|
3950
3973
|
modelId
|
3951
3974
|
};
|
3975
|
+
let chunkBuffer = "";
|
3976
|
+
let chunkTextPublished = false;
|
3977
|
+
async function publishTextChunk({
|
3978
|
+
controller,
|
3979
|
+
chunk
|
3980
|
+
}) {
|
3981
|
+
controller.enqueue(chunk);
|
3982
|
+
stepText += chunk.textDelta;
|
3983
|
+
fullStepText += chunk.textDelta;
|
3984
|
+
chunkTextPublished = true;
|
3985
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3986
|
+
}
|
3952
3987
|
addStream(
|
3953
3988
|
stream2.pipeThrough(
|
3954
3989
|
new TransformStream({
|
@@ -3974,10 +4009,22 @@ var DefaultStreamTextResult = class {
|
|
3974
4009
|
const chunkType = chunk.type;
|
3975
4010
|
switch (chunkType) {
|
3976
4011
|
case "text-delta": {
|
3977
|
-
|
3978
|
-
|
3979
|
-
|
3980
|
-
|
4012
|
+
if (continueSteps) {
|
4013
|
+
chunkBuffer += chunk.textDelta;
|
4014
|
+
const split = splitOnLastWhitespace(chunkBuffer);
|
4015
|
+
if (split != null) {
|
4016
|
+
chunkBuffer = split.suffix;
|
4017
|
+
await publishTextChunk({
|
4018
|
+
controller,
|
4019
|
+
chunk: {
|
4020
|
+
type: "text-delta",
|
4021
|
+
textDelta: split.prefix + split.whitespace
|
4022
|
+
}
|
4023
|
+
});
|
4024
|
+
}
|
4025
|
+
} else {
|
4026
|
+
await publishTextChunk({ controller, chunk });
|
4027
|
+
}
|
3981
4028
|
break;
|
3982
4029
|
}
|
3983
4030
|
case "tool-call": {
|
@@ -4033,6 +4080,30 @@ var DefaultStreamTextResult = class {
|
|
4033
4080
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4034
4081
|
async flush(controller) {
|
4035
4082
|
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4083
|
+
let nextStepType = "done";
|
4084
|
+
if (currentStep + 1 < maxSteps) {
|
4085
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4086
|
+
stepToolCalls.length === 0) {
|
4087
|
+
nextStepType = "continue";
|
4088
|
+
} else if (
|
4089
|
+
// there are tool calls:
|
4090
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4091
|
+
stepToolResults.length === stepToolCalls.length
|
4092
|
+
) {
|
4093
|
+
nextStepType = "tool-result";
|
4094
|
+
}
|
4095
|
+
}
|
4096
|
+
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4097
|
+
stepType === "continue" && !chunkTextPublished)) {
|
4098
|
+
await publishTextChunk({
|
4099
|
+
controller,
|
4100
|
+
chunk: {
|
4101
|
+
type: "text-delta",
|
4102
|
+
textDelta: chunkBuffer
|
4103
|
+
}
|
4104
|
+
});
|
4105
|
+
chunkBuffer = "";
|
4106
|
+
}
|
4036
4107
|
try {
|
4037
4108
|
doStreamSpan2.setAttributes(
|
4038
4109
|
selectTelemetryAttributes({
|
@@ -4073,7 +4144,8 @@ var DefaultStreamTextResult = class {
|
|
4073
4144
|
usage: stepUsage,
|
4074
4145
|
experimental_providerMetadata: stepProviderMetadata,
|
4075
4146
|
logprobs: stepLogProbs,
|
4076
|
-
response: stepResponse
|
4147
|
+
response: stepResponse,
|
4148
|
+
isContinued: nextStepType === "continue"
|
4077
4149
|
});
|
4078
4150
|
const stepResult = {
|
4079
4151
|
stepType,
|
@@ -4086,7 +4158,8 @@ var DefaultStreamTextResult = class {
|
|
4086
4158
|
logprobs: stepLogProbs,
|
4087
4159
|
response: stepResponse,
|
4088
4160
|
rawResponse: self.rawResponse,
|
4089
|
-
experimental_providerMetadata: stepProviderMetadata
|
4161
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4162
|
+
isContinued: nextStepType === "continue"
|
4090
4163
|
};
|
4091
4164
|
stepResults.push(stepResult);
|
4092
4165
|
await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
|
@@ -4095,19 +4168,6 @@ var DefaultStreamTextResult = class {
|
|
4095
4168
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4096
4169
|
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4097
4170
|
};
|
4098
|
-
let nextStepType = "done";
|
4099
|
-
if (currentStep + 1 < maxSteps) {
|
4100
|
-
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4101
|
-
stepToolCalls.length === 0) {
|
4102
|
-
nextStepType = "continue";
|
4103
|
-
} else if (
|
4104
|
-
// there are tool calls:
|
4105
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
4106
|
-
stepToolResults.length === stepToolCalls.length
|
4107
|
-
) {
|
4108
|
-
nextStepType = "tool-result";
|
4109
|
-
}
|
4110
|
-
}
|
4111
4171
|
if (nextStepType !== "done") {
|
4112
4172
|
if (stepType === "continue") {
|
4113
4173
|
const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
|
@@ -4375,7 +4435,8 @@ var DefaultStreamTextResult = class {
|
|
4375
4435
|
usage: sendUsage ? {
|
4376
4436
|
promptTokens: chunk.usage.promptTokens,
|
4377
4437
|
completionTokens: chunk.usage.completionTokens
|
4378
|
-
} : void 0
|
4438
|
+
} : void 0,
|
4439
|
+
isContinued: chunk.isContinued
|
4379
4440
|
})
|
4380
4441
|
);
|
4381
4442
|
break;
|