ai 3.4.1 → 3.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +26 -4
- package/dist/index.d.ts +26 -4
- package/dist/index.js +124 -53
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +124 -53
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
@@ -2990,6 +2990,8 @@ async function generateText({
|
|
2990
2990
|
maxAutomaticRoundtrips = 0,
|
2991
2991
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
2992
2992
|
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
2993
|
+
experimental_continuationSteps,
|
2994
|
+
experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
|
2993
2995
|
experimental_telemetry: telemetry,
|
2994
2996
|
experimental_providerMetadata: providerMetadata,
|
2995
2997
|
_internal: {
|
@@ -3033,7 +3035,7 @@ async function generateText({
|
|
3033
3035
|
}),
|
3034
3036
|
tracer,
|
3035
3037
|
fn: async (span) => {
|
3036
|
-
var _a12, _b, _c, _d, _e;
|
3038
|
+
var _a12, _b, _c, _d, _e, _f, _g;
|
3037
3039
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3038
3040
|
const validatedPrompt = validatePrompt({
|
3039
3041
|
system,
|
@@ -3054,12 +3056,14 @@ async function generateText({
|
|
3054
3056
|
let currentToolResults = [];
|
3055
3057
|
let stepCount = 0;
|
3056
3058
|
const responseMessages = [];
|
3059
|
+
let text = "";
|
3057
3060
|
const steps = [];
|
3058
3061
|
const usage = {
|
3059
3062
|
completionTokens: 0,
|
3060
3063
|
promptTokens: 0,
|
3061
3064
|
totalTokens: 0
|
3062
3065
|
};
|
3066
|
+
let stepType = "initial";
|
3063
3067
|
do {
|
3064
3068
|
const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
|
3065
3069
|
currentModelResponse = await retry(
|
@@ -3091,7 +3095,7 @@ async function generateText({
|
|
3091
3095
|
}),
|
3092
3096
|
tracer,
|
3093
3097
|
fn: async (span2) => {
|
3094
|
-
var _a13, _b2, _c2, _d2, _e2,
|
3098
|
+
var _a13, _b2, _c2, _d2, _e2, _f2;
|
3095
3099
|
const result = await model.doGenerate({
|
3096
3100
|
mode,
|
3097
3101
|
...callSettings,
|
@@ -3104,7 +3108,7 @@ async function generateText({
|
|
3104
3108
|
const responseData = {
|
3105
3109
|
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3106
3110
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3107
|
-
modelId: (
|
3111
|
+
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
|
3108
3112
|
};
|
3109
3113
|
span2.setAttributes(
|
3110
3114
|
selectTelemetryAttributes({
|
@@ -3158,8 +3162,14 @@ async function generateText({
|
|
3158
3162
|
usage.completionTokens += currentUsage.completionTokens;
|
3159
3163
|
usage.promptTokens += currentUsage.promptTokens;
|
3160
3164
|
usage.totalTokens += currentUsage.totalTokens;
|
3165
|
+
if (stepType === "continue") {
|
3166
|
+
text += (_b = currentModelResponse.text) != null ? _b : "";
|
3167
|
+
} else {
|
3168
|
+
text = (_c = currentModelResponse.text) != null ? _c : "";
|
3169
|
+
}
|
3161
3170
|
const currentStep = {
|
3162
|
-
|
3171
|
+
stepType,
|
3172
|
+
text: (_d = currentModelResponse.text) != null ? _d : "",
|
3163
3173
|
toolCalls: currentToolCalls,
|
3164
3174
|
toolResults: currentToolResults,
|
3165
3175
|
finishReason: currentModelResponse.finishReason,
|
@@ -3168,29 +3178,55 @@ async function generateText({
|
|
3168
3178
|
logprobs: currentModelResponse.logprobs,
|
3169
3179
|
response: {
|
3170
3180
|
...currentModelResponse.response,
|
3171
|
-
headers: (
|
3181
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3172
3182
|
},
|
3173
3183
|
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3174
3184
|
};
|
3175
3185
|
steps.push(currentStep);
|
3176
3186
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
|
3177
|
-
|
3178
|
-
|
3179
|
-
|
3180
|
-
|
3181
|
-
|
3182
|
-
|
3183
|
-
|
3184
|
-
|
3185
|
-
|
3186
|
-
|
3187
|
-
|
3188
|
-
|
3189
|
-
|
3190
|
-
|
3191
|
-
|
3192
|
-
|
3193
|
-
|
3187
|
+
if (stepType === "continue") {
|
3188
|
+
const lastResponseMessage = responseMessages.pop();
|
3189
|
+
promptMessages.pop();
|
3190
|
+
if (typeof lastResponseMessage.content === "string") {
|
3191
|
+
lastResponseMessage.content = text;
|
3192
|
+
} else {
|
3193
|
+
lastResponseMessage.content.push({
|
3194
|
+
text: (_f = currentModelResponse.text) != null ? _f : "",
|
3195
|
+
type: "text"
|
3196
|
+
});
|
3197
|
+
}
|
3198
|
+
responseMessages.push(lastResponseMessage);
|
3199
|
+
promptMessages.push(
|
3200
|
+
convertToLanguageModelMessage(lastResponseMessage, null)
|
3201
|
+
);
|
3202
|
+
} else {
|
3203
|
+
const newResponseMessages = toResponseMessages({
|
3204
|
+
text: currentModelResponse.text,
|
3205
|
+
toolCalls: currentToolCalls,
|
3206
|
+
toolResults: currentToolResults
|
3207
|
+
});
|
3208
|
+
responseMessages.push(...newResponseMessages);
|
3209
|
+
promptMessages.push(
|
3210
|
+
...newResponseMessages.map(
|
3211
|
+
(message) => convertToLanguageModelMessage(message, null)
|
3212
|
+
)
|
3213
|
+
);
|
3214
|
+
}
|
3215
|
+
if (++stepCount >= maxSteps) {
|
3216
|
+
stepType = "done";
|
3217
|
+
} else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
|
3218
|
+
currentToolCalls.length === 0) {
|
3219
|
+
stepType = "continue";
|
3220
|
+
} else if (
|
3221
|
+
// there are tool calls:
|
3222
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
3223
|
+
currentToolResults.length === currentToolCalls.length
|
3224
|
+
) {
|
3225
|
+
stepType = "tool-result";
|
3226
|
+
} else {
|
3227
|
+
stepType = "done";
|
3228
|
+
}
|
3229
|
+
} while (stepType !== "done");
|
3194
3230
|
span.setAttributes(
|
3195
3231
|
selectTelemetryAttributes({
|
3196
3232
|
telemetry,
|
@@ -3216,10 +3252,7 @@ async function generateText({
|
|
3216
3252
|
})
|
3217
3253
|
);
|
3218
3254
|
return new DefaultGenerateTextResult({
|
3219
|
-
|
3220
|
-
// If they need to check if the model did not return any text,
|
3221
|
-
// they can check the length of the string:
|
3222
|
-
text: (_d = currentModelResponse.text) != null ? _d : "",
|
3255
|
+
text,
|
3223
3256
|
toolCalls: currentToolCalls,
|
3224
3257
|
toolResults: currentToolResults,
|
3225
3258
|
finishReason: currentModelResponse.finishReason,
|
@@ -3227,7 +3260,7 @@ async function generateText({
|
|
3227
3260
|
warnings: currentModelResponse.warnings,
|
3228
3261
|
response: {
|
3229
3262
|
...currentModelResponse.response,
|
3230
|
-
headers: (
|
3263
|
+
headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers
|
3231
3264
|
},
|
3232
3265
|
logprobs: currentModelResponse.logprobs,
|
3233
3266
|
responseMessages,
|
@@ -3673,6 +3706,7 @@ async function streamText({
|
|
3673
3706
|
headers,
|
3674
3707
|
maxToolRoundtrips = 0,
|
3675
3708
|
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3709
|
+
experimental_continueSteps: continueSteps = false,
|
3676
3710
|
experimental_telemetry: telemetry,
|
3677
3711
|
experimental_providerMetadata: providerMetadata,
|
3678
3712
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
@@ -3817,6 +3851,7 @@ async function streamText({
|
|
3817
3851
|
telemetry,
|
3818
3852
|
startTimestampMs,
|
3819
3853
|
maxSteps,
|
3854
|
+
continueSteps,
|
3820
3855
|
startStep,
|
3821
3856
|
promptMessages,
|
3822
3857
|
modelId: model.modelId,
|
@@ -3840,6 +3875,7 @@ var DefaultStreamTextResult = class {
|
|
3840
3875
|
telemetry,
|
3841
3876
|
startTimestampMs,
|
3842
3877
|
maxSteps,
|
3878
|
+
continueSteps,
|
3843
3879
|
startStep,
|
3844
3880
|
promptMessages,
|
3845
3881
|
modelId,
|
@@ -3891,7 +3927,9 @@ var DefaultStreamTextResult = class {
|
|
3891
3927
|
promptTokens: 0,
|
3892
3928
|
completionTokens: 0,
|
3893
3929
|
totalTokens: 0
|
3894
|
-
}
|
3930
|
+
},
|
3931
|
+
stepType,
|
3932
|
+
previousStepText = ""
|
3895
3933
|
}) {
|
3896
3934
|
const stepToolCalls = [];
|
3897
3935
|
const stepToolResults = [];
|
@@ -3904,6 +3942,7 @@ var DefaultStreamTextResult = class {
|
|
3904
3942
|
let stepProviderMetadata;
|
3905
3943
|
let stepFirstChunk = true;
|
3906
3944
|
let stepText = "";
|
3945
|
+
let fullStepText = stepType === "continue" ? previousStepText : "";
|
3907
3946
|
let stepLogProbs;
|
3908
3947
|
let stepResponse = {
|
3909
3948
|
id: generateId3(),
|
@@ -3937,6 +3976,7 @@ var DefaultStreamTextResult = class {
|
|
3937
3976
|
case "text-delta": {
|
3938
3977
|
controller.enqueue(chunk);
|
3939
3978
|
stepText += chunk.textDelta;
|
3979
|
+
fullStepText += chunk.textDelta;
|
3940
3980
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3941
3981
|
break;
|
3942
3982
|
}
|
@@ -4036,6 +4076,7 @@ var DefaultStreamTextResult = class {
|
|
4036
4076
|
response: stepResponse
|
4037
4077
|
});
|
4038
4078
|
const stepResult = {
|
4079
|
+
stepType,
|
4039
4080
|
text: stepText,
|
4040
4081
|
toolCalls: stepToolCalls,
|
4041
4082
|
toolResults: stepToolResults,
|
@@ -4054,21 +4095,37 @@ var DefaultStreamTextResult = class {
|
|
4054
4095
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4055
4096
|
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4056
4097
|
};
|
4057
|
-
|
4058
|
-
|
4059
|
-
|
4060
|
-
|
4061
|
-
|
4062
|
-
|
4063
|
-
|
4064
|
-
|
4098
|
+
let nextStepType = "done";
|
4099
|
+
if (currentStep + 1 < maxSteps) {
|
4100
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4101
|
+
stepToolCalls.length === 0) {
|
4102
|
+
nextStepType = "continue";
|
4103
|
+
} else if (
|
4104
|
+
// there are tool calls:
|
4105
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4106
|
+
stepToolResults.length === stepToolCalls.length
|
4107
|
+
) {
|
4108
|
+
nextStepType = "tool-result";
|
4109
|
+
}
|
4110
|
+
}
|
4111
|
+
if (nextStepType !== "done") {
|
4112
|
+
if (stepType === "continue") {
|
4113
|
+
const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
|
4114
|
+
lastPromptMessage.content.push({
|
4065
4115
|
text: stepText,
|
4066
|
-
|
4067
|
-
|
4068
|
-
|
4069
|
-
|
4070
|
-
|
4071
|
-
|
4116
|
+
type: "text"
|
4117
|
+
});
|
4118
|
+
} else {
|
4119
|
+
promptMessages2.push(
|
4120
|
+
...toResponseMessages({
|
4121
|
+
text: stepText,
|
4122
|
+
toolCalls: stepToolCalls,
|
4123
|
+
toolResults: stepToolResults
|
4124
|
+
}).map(
|
4125
|
+
(message) => convertToLanguageModelMessage(message, null)
|
4126
|
+
)
|
4127
|
+
);
|
4128
|
+
}
|
4072
4129
|
const {
|
4073
4130
|
result,
|
4074
4131
|
doStreamSpan: doStreamSpan3,
|
@@ -4085,7 +4142,9 @@ var DefaultStreamTextResult = class {
|
|
4085
4142
|
doStreamSpan: doStreamSpan3,
|
4086
4143
|
currentStep: currentStep + 1,
|
4087
4144
|
promptMessages: promptMessages2,
|
4088
|
-
usage: combinedUsage
|
4145
|
+
usage: combinedUsage,
|
4146
|
+
stepType: nextStepType,
|
4147
|
+
previousStepText: fullStepText
|
4089
4148
|
});
|
4090
4149
|
return;
|
4091
4150
|
}
|
@@ -4104,7 +4163,7 @@ var DefaultStreamTextResult = class {
|
|
4104
4163
|
telemetry,
|
4105
4164
|
attributes: {
|
4106
4165
|
"ai.response.finishReason": stepFinishReason,
|
4107
|
-
"ai.response.text": { output: () =>
|
4166
|
+
"ai.response.text": { output: () => fullStepText },
|
4108
4167
|
"ai.response.toolCalls": {
|
4109
4168
|
output: () => stepToolCallsJson
|
4110
4169
|
},
|
@@ -4112,27 +4171,38 @@ var DefaultStreamTextResult = class {
|
|
4112
4171
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
4113
4172
|
// deprecated
|
4114
4173
|
"ai.finishReason": stepFinishReason,
|
4115
|
-
"ai.result.text": { output: () =>
|
4174
|
+
"ai.result.text": { output: () => fullStepText },
|
4116
4175
|
"ai.result.toolCalls": {
|
4117
4176
|
output: () => stepToolCallsJson
|
4118
4177
|
}
|
4119
4178
|
}
|
4120
4179
|
})
|
4121
4180
|
);
|
4122
|
-
const responseMessages = stepResults.reduce(
|
4123
|
-
(
|
4181
|
+
const responseMessages = stepResults.reduce((responseMessages2, step) => {
|
4182
|
+
if (step.stepType === "continue") {
|
4183
|
+
const lastResponseMessage = responseMessages2.pop();
|
4184
|
+
if (typeof lastResponseMessage.content === "string") {
|
4185
|
+
lastResponseMessage.content += step.text;
|
4186
|
+
} else {
|
4187
|
+
lastResponseMessage.content.push({
|
4188
|
+
text: step.text,
|
4189
|
+
type: "text"
|
4190
|
+
});
|
4191
|
+
}
|
4192
|
+
return [...responseMessages2, lastResponseMessage];
|
4193
|
+
}
|
4194
|
+
return [
|
4124
4195
|
...responseMessages2,
|
4125
4196
|
...toResponseMessages({
|
4126
4197
|
text: step.text,
|
4127
4198
|
toolCalls: step.toolCalls,
|
4128
4199
|
toolResults: step.toolResults
|
4129
4200
|
})
|
4130
|
-
]
|
4131
|
-
|
4132
|
-
);
|
4201
|
+
];
|
4202
|
+
}, []);
|
4133
4203
|
resolveUsage(combinedUsage);
|
4134
4204
|
resolveFinishReason(stepFinishReason);
|
4135
|
-
resolveText(
|
4205
|
+
resolveText(fullStepText);
|
4136
4206
|
resolveToolCalls(stepToolCalls);
|
4137
4207
|
resolveProviderMetadata(stepProviderMetadata);
|
4138
4208
|
resolveToolResults(stepToolResults);
|
@@ -4146,7 +4216,7 @@ var DefaultStreamTextResult = class {
|
|
4146
4216
|
finishReason: stepFinishReason,
|
4147
4217
|
logprobs: stepLogProbs,
|
4148
4218
|
usage: combinedUsage,
|
4149
|
-
text:
|
4219
|
+
text: fullStepText,
|
4150
4220
|
toolCalls: stepToolCalls,
|
4151
4221
|
// The tool results are inferred as a never[] type, because they are
|
4152
4222
|
// optional and the execute method with an inferred result type is
|
@@ -4179,7 +4249,8 @@ var DefaultStreamTextResult = class {
|
|
4179
4249
|
doStreamSpan,
|
4180
4250
|
currentStep: 0,
|
4181
4251
|
promptMessages,
|
4182
|
-
usage: void 0
|
4252
|
+
usage: void 0,
|
4253
|
+
stepType: "initial"
|
4183
4254
|
});
|
4184
4255
|
}
|
4185
4256
|
/**
|