ai 3.4.1 → 3.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -1
- package/dist/index.d.ts +2 -1
- package/dist/index.js +58 -27
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +58 -27
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
@@ -1491,7 +1491,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1491
1491
|
@returns
|
1492
1492
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1493
1493
|
*/
|
1494
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1494
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps: continuationSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1495
1495
|
/**
|
1496
1496
|
The language model to use.
|
1497
1497
|
*/
|
@@ -1531,6 +1531,7 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
|
|
1531
1531
|
By default, it's set to 1, which means that only a single LLM call is made.
|
1532
1532
|
*/
|
1533
1533
|
maxSteps?: number;
|
1534
|
+
experimental_continuationSteps?: boolean;
|
1534
1535
|
/**
|
1535
1536
|
* Optional telemetry configuration (experimental).
|
1536
1537
|
*/
|
package/dist/index.d.ts
CHANGED
@@ -1491,7 +1491,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1491
1491
|
@returns
|
1492
1492
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1493
1493
|
*/
|
1494
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1494
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps: continuationSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1495
1495
|
/**
|
1496
1496
|
The language model to use.
|
1497
1497
|
*/
|
@@ -1531,6 +1531,7 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
|
|
1531
1531
|
By default, it's set to 1, which means that only a single LLM call is made.
|
1532
1532
|
*/
|
1533
1533
|
maxSteps?: number;
|
1534
|
+
experimental_continuationSteps?: boolean;
|
1534
1535
|
/**
|
1535
1536
|
* Optional telemetry configuration (experimental).
|
1536
1537
|
*/
|
package/dist/index.js
CHANGED
@@ -3052,6 +3052,7 @@ async function generateText({
|
|
3052
3052
|
maxAutomaticRoundtrips = 0,
|
3053
3053
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
3054
3054
|
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3055
|
+
experimental_continuationSteps: continuationSteps = false,
|
3055
3056
|
experimental_telemetry: telemetry,
|
3056
3057
|
experimental_providerMetadata: providerMetadata,
|
3057
3058
|
_internal: {
|
@@ -3095,7 +3096,7 @@ async function generateText({
|
|
3095
3096
|
}),
|
3096
3097
|
tracer,
|
3097
3098
|
fn: async (span) => {
|
3098
|
-
var _a12, _b, _c, _d, _e;
|
3099
|
+
var _a12, _b, _c, _d, _e, _f;
|
3099
3100
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3100
3101
|
const validatedPrompt = validatePrompt({
|
3101
3102
|
system,
|
@@ -3116,12 +3117,14 @@ async function generateText({
|
|
3116
3117
|
let currentToolResults = [];
|
3117
3118
|
let stepCount = 0;
|
3118
3119
|
const responseMessages = [];
|
3120
|
+
let text = "";
|
3119
3121
|
const steps = [];
|
3120
3122
|
const usage = {
|
3121
3123
|
completionTokens: 0,
|
3122
3124
|
promptTokens: 0,
|
3123
3125
|
totalTokens: 0
|
3124
3126
|
};
|
3127
|
+
let stepType = "initial";
|
3125
3128
|
do {
|
3126
3129
|
const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
|
3127
3130
|
currentModelResponse = await retry(
|
@@ -3153,7 +3156,7 @@ async function generateText({
|
|
3153
3156
|
}),
|
3154
3157
|
tracer,
|
3155
3158
|
fn: async (span2) => {
|
3156
|
-
var _a13, _b2, _c2, _d2, _e2,
|
3159
|
+
var _a13, _b2, _c2, _d2, _e2, _f2;
|
3157
3160
|
const result = await model.doGenerate({
|
3158
3161
|
mode,
|
3159
3162
|
...callSettings,
|
@@ -3166,7 +3169,7 @@ async function generateText({
|
|
3166
3169
|
const responseData = {
|
3167
3170
|
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3168
3171
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3169
|
-
modelId: (
|
3172
|
+
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
|
3170
3173
|
};
|
3171
3174
|
span2.setAttributes(
|
3172
3175
|
selectTelemetryAttributes({
|
@@ -3220,8 +3223,13 @@ async function generateText({
|
|
3220
3223
|
usage.completionTokens += currentUsage.completionTokens;
|
3221
3224
|
usage.promptTokens += currentUsage.promptTokens;
|
3222
3225
|
usage.totalTokens += currentUsage.totalTokens;
|
3226
|
+
if (stepType === "continuation") {
|
3227
|
+
text += " " + ((_b = currentModelResponse.text) != null ? _b : "");
|
3228
|
+
} else {
|
3229
|
+
text = (_c = currentModelResponse.text) != null ? _c : "";
|
3230
|
+
}
|
3223
3231
|
const currentStep = {
|
3224
|
-
text: (
|
3232
|
+
text: (_d = currentModelResponse.text) != null ? _d : "",
|
3225
3233
|
toolCalls: currentToolCalls,
|
3226
3234
|
toolResults: currentToolResults,
|
3227
3235
|
finishReason: currentModelResponse.finishReason,
|
@@ -3230,29 +3238,55 @@ async function generateText({
|
|
3230
3238
|
logprobs: currentModelResponse.logprobs,
|
3231
3239
|
response: {
|
3232
3240
|
...currentModelResponse.response,
|
3233
|
-
headers: (
|
3241
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3234
3242
|
},
|
3235
3243
|
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3236
3244
|
};
|
3237
3245
|
steps.push(currentStep);
|
3238
3246
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
|
3239
|
-
|
3240
|
-
|
3241
|
-
|
3242
|
-
|
3243
|
-
|
3244
|
-
|
3245
|
-
|
3246
|
-
|
3247
|
-
|
3248
|
-
|
3249
|
-
|
3250
|
-
|
3251
|
-
|
3252
|
-
|
3253
|
-
|
3254
|
-
|
3255
|
-
|
3247
|
+
if (stepType === "continuation") {
|
3248
|
+
const lastResponseMessage = responseMessages.pop();
|
3249
|
+
promptMessages.pop();
|
3250
|
+
if (typeof lastResponseMessage.content === "string") {
|
3251
|
+
lastResponseMessage.content = text;
|
3252
|
+
} else {
|
3253
|
+
lastResponseMessage.content.push({
|
3254
|
+
text: " " + currentModelResponse.text,
|
3255
|
+
type: "text"
|
3256
|
+
});
|
3257
|
+
}
|
3258
|
+
responseMessages.push(lastResponseMessage);
|
3259
|
+
promptMessages.push(
|
3260
|
+
convertToLanguageModelMessage(lastResponseMessage, null)
|
3261
|
+
);
|
3262
|
+
} else {
|
3263
|
+
const newResponseMessages = toResponseMessages({
|
3264
|
+
text: currentModelResponse.text,
|
3265
|
+
toolCalls: currentToolCalls,
|
3266
|
+
toolResults: currentToolResults
|
3267
|
+
});
|
3268
|
+
responseMessages.push(...newResponseMessages);
|
3269
|
+
promptMessages.push(
|
3270
|
+
...newResponseMessages.map(
|
3271
|
+
(message) => convertToLanguageModelMessage(message, null)
|
3272
|
+
)
|
3273
|
+
);
|
3274
|
+
}
|
3275
|
+
if (++stepCount >= maxSteps) {
|
3276
|
+
stepType = "done";
|
3277
|
+
} else if (continuationSteps === true && currentStep.finishReason === "length" && // only use continuation when there are no tool calls:
|
3278
|
+
currentToolCalls.length === 0) {
|
3279
|
+
stepType = "continuation";
|
3280
|
+
} else if (
|
3281
|
+
// there are tool calls:
|
3282
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
3283
|
+
currentToolResults.length === currentToolCalls.length
|
3284
|
+
) {
|
3285
|
+
stepType = "tool-result";
|
3286
|
+
} else {
|
3287
|
+
stepType = "done";
|
3288
|
+
}
|
3289
|
+
} while (stepType !== "done");
|
3256
3290
|
span.setAttributes(
|
3257
3291
|
selectTelemetryAttributes({
|
3258
3292
|
telemetry,
|
@@ -3278,10 +3312,7 @@ async function generateText({
|
|
3278
3312
|
})
|
3279
3313
|
);
|
3280
3314
|
return new DefaultGenerateTextResult({
|
3281
|
-
|
3282
|
-
// If they need to check if the model did not return any text,
|
3283
|
-
// they can check the length of the string:
|
3284
|
-
text: (_d = currentModelResponse.text) != null ? _d : "",
|
3315
|
+
text,
|
3285
3316
|
toolCalls: currentToolCalls,
|
3286
3317
|
toolResults: currentToolResults,
|
3287
3318
|
finishReason: currentModelResponse.finishReason,
|
@@ -3289,7 +3320,7 @@ async function generateText({
|
|
3289
3320
|
warnings: currentModelResponse.warnings,
|
3290
3321
|
response: {
|
3291
3322
|
...currentModelResponse.response,
|
3292
|
-
headers: (
|
3323
|
+
headers: (_f = currentModelResponse.rawResponse) == null ? void 0 : _f.headers
|
3293
3324
|
},
|
3294
3325
|
logprobs: currentModelResponse.logprobs,
|
3295
3326
|
responseMessages,
|