ai 3.4.2 → 3.4.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +25 -4
- package/dist/index.d.ts +25 -4
- package/dist/index.js +153 -56
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +153 -56
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,19 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 3.4.4
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 4db074b: fix (ai/core): correct whitespace in generateText continueSteps
|
8
|
+
- 1297e1b: fix (ai/core): correct whitespace in streamText continueSteps
|
9
|
+
|
10
|
+
## 3.4.3
|
11
|
+
|
12
|
+
### Patch Changes
|
13
|
+
|
14
|
+
- b270ae3: feat (ai/core): streamText continueSteps (experimental)
|
15
|
+
- b270ae3: chore (ai/core): rename generateText continuationSteps to continueSteps
|
16
|
+
|
3
17
|
## 3.4.2
|
4
18
|
|
5
19
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
|
|
1362
1362
|
results that can be fully encapsulated in the provider.
|
1363
1363
|
*/
|
1364
1364
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1365
|
+
/**
|
1366
|
+
The type of step that this result is for. The first step is always
|
1367
|
+
an "initial" step, and subsequent steps are either "continue" steps
|
1368
|
+
or "tool-result" steps.
|
1369
|
+
*/
|
1370
|
+
readonly stepType: 'initial' | 'continue' | 'tool-result';
|
1365
1371
|
};
|
1366
1372
|
|
1367
1373
|
/**
|
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1491
1497
|
@returns
|
1492
1498
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1493
1499
|
*/
|
1494
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps:
|
1500
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1495
1501
|
/**
|
1496
1502
|
The language model to use.
|
1497
1503
|
*/
|
@@ -1531,9 +1537,18 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
|
|
1531
1537
|
By default, it's set to 1, which means that only a single LLM call is made.
|
1532
1538
|
*/
|
1533
1539
|
maxSteps?: number;
|
1540
|
+
/**
|
1541
|
+
@deprecated Use `experimental_continueSteps` instead.
|
1542
|
+
*/
|
1534
1543
|
experimental_continuationSteps?: boolean;
|
1535
1544
|
/**
|
1536
|
-
|
1545
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1546
|
+
|
1547
|
+
By default, it's set to false.
|
1548
|
+
*/
|
1549
|
+
experimental_continueSteps?: boolean;
|
1550
|
+
/**
|
1551
|
+
Optional telemetry configuration (experimental).
|
1537
1552
|
*/
|
1538
1553
|
experimental_telemetry?: TelemetrySettings;
|
1539
1554
|
/**
|
@@ -1832,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1832
1847
|
@return
|
1833
1848
|
A result object for accessing different stream types and additional information.
|
1834
1849
|
*/
|
1835
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1850
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1836
1851
|
/**
|
1837
1852
|
The language model to use.
|
1838
1853
|
*/
|
@@ -1869,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
|
|
1869
1884
|
*/
|
1870
1885
|
maxSteps?: number;
|
1871
1886
|
/**
|
1887
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1888
|
+
|
1889
|
+
By default, it's set to false.
|
1890
|
+
*/
|
1891
|
+
experimental_continueSteps?: boolean;
|
1892
|
+
/**
|
1872
1893
|
Optional telemetry configuration (experimental).
|
1873
1894
|
*/
|
1874
1895
|
experimental_telemetry?: TelemetrySettings;
|
@@ -1896,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
|
|
1896
1917
|
|
1897
1918
|
The usage is the combined usage of all steps.
|
1898
1919
|
*/
|
1899
|
-
onFinish?: (event: StepResult<TOOLS> & {
|
1920
|
+
onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
|
1900
1921
|
/**
|
1901
1922
|
Details for all steps.
|
1902
1923
|
*/
|
package/dist/index.d.ts
CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
|
|
1362
1362
|
results that can be fully encapsulated in the provider.
|
1363
1363
|
*/
|
1364
1364
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1365
|
+
/**
|
1366
|
+
The type of step that this result is for. The first step is always
|
1367
|
+
an "initial" step, and subsequent steps are either "continue" steps
|
1368
|
+
or "tool-result" steps.
|
1369
|
+
*/
|
1370
|
+
readonly stepType: 'initial' | 'continue' | 'tool-result';
|
1365
1371
|
};
|
1366
1372
|
|
1367
1373
|
/**
|
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1491
1497
|
@returns
|
1492
1498
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1493
1499
|
*/
|
1494
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps:
|
1500
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1495
1501
|
/**
|
1496
1502
|
The language model to use.
|
1497
1503
|
*/
|
@@ -1531,9 +1537,18 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
|
|
1531
1537
|
By default, it's set to 1, which means that only a single LLM call is made.
|
1532
1538
|
*/
|
1533
1539
|
maxSteps?: number;
|
1540
|
+
/**
|
1541
|
+
@deprecated Use `experimental_continueSteps` instead.
|
1542
|
+
*/
|
1534
1543
|
experimental_continuationSteps?: boolean;
|
1535
1544
|
/**
|
1536
|
-
|
1545
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1546
|
+
|
1547
|
+
By default, it's set to false.
|
1548
|
+
*/
|
1549
|
+
experimental_continueSteps?: boolean;
|
1550
|
+
/**
|
1551
|
+
Optional telemetry configuration (experimental).
|
1537
1552
|
*/
|
1538
1553
|
experimental_telemetry?: TelemetrySettings;
|
1539
1554
|
/**
|
@@ -1832,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1832
1847
|
@return
|
1833
1848
|
A result object for accessing different stream types and additional information.
|
1834
1849
|
*/
|
1835
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1850
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1836
1851
|
/**
|
1837
1852
|
The language model to use.
|
1838
1853
|
*/
|
@@ -1869,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
|
|
1869
1884
|
*/
|
1870
1885
|
maxSteps?: number;
|
1871
1886
|
/**
|
1887
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1888
|
+
|
1889
|
+
By default, it's set to false.
|
1890
|
+
*/
|
1891
|
+
experimental_continueSteps?: boolean;
|
1892
|
+
/**
|
1872
1893
|
Optional telemetry configuration (experimental).
|
1873
1894
|
*/
|
1874
1895
|
experimental_telemetry?: TelemetrySettings;
|
@@ -1896,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
|
|
1896
1917
|
|
1897
1918
|
The usage is the combined usage of all steps.
|
1898
1919
|
*/
|
1899
|
-
onFinish?: (event: StepResult<TOOLS> & {
|
1920
|
+
onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
|
1900
1921
|
/**
|
1901
1922
|
Details for all steps.
|
1902
1923
|
*/
|
package/dist/index.js
CHANGED
@@ -2977,6 +2977,19 @@ function prepareToolsAndToolChoice({
|
|
2977
2977
|
};
|
2978
2978
|
}
|
2979
2979
|
|
2980
|
+
// core/util/split-on-last-whitespace.ts
|
2981
|
+
var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
|
2982
|
+
function splitOnLastWhitespace(text) {
|
2983
|
+
const match = text.match(lastWhitespaceRegexp);
|
2984
|
+
return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
|
2985
|
+
}
|
2986
|
+
|
2987
|
+
// core/util/remove-text-after-last-whitespace.ts
|
2988
|
+
function removeTextAfterLastWhitespace(text) {
|
2989
|
+
const match = splitOnLastWhitespace(text);
|
2990
|
+
return match ? match.prefix + match.whitespace : text;
|
2991
|
+
}
|
2992
|
+
|
2980
2993
|
// core/generate-text/parse-tool-call.ts
|
2981
2994
|
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
2982
2995
|
var import_ui_utils4 = require("@ai-sdk/ui-utils");
|
@@ -3052,7 +3065,8 @@ async function generateText({
|
|
3052
3065
|
maxAutomaticRoundtrips = 0,
|
3053
3066
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
3054
3067
|
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3055
|
-
experimental_continuationSteps
|
3068
|
+
experimental_continuationSteps,
|
3069
|
+
experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
|
3056
3070
|
experimental_telemetry: telemetry,
|
3057
3071
|
experimental_providerMetadata: providerMetadata,
|
3058
3072
|
_internal: {
|
@@ -3096,7 +3110,7 @@ async function generateText({
|
|
3096
3110
|
}),
|
3097
3111
|
tracer,
|
3098
3112
|
fn: async (span) => {
|
3099
|
-
var _a12, _b, _c, _d, _e
|
3113
|
+
var _a12, _b, _c, _d, _e;
|
3100
3114
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3101
3115
|
const validatedPrompt = validatePrompt({
|
3102
3116
|
system,
|
@@ -3156,7 +3170,7 @@ async function generateText({
|
|
3156
3170
|
}),
|
3157
3171
|
tracer,
|
3158
3172
|
fn: async (span2) => {
|
3159
|
-
var _a13, _b2, _c2, _d2, _e2,
|
3173
|
+
var _a13, _b2, _c2, _d2, _e2, _f;
|
3160
3174
|
const result = await model.doGenerate({
|
3161
3175
|
mode,
|
3162
3176
|
...callSettings,
|
@@ -3169,7 +3183,7 @@ async function generateText({
|
|
3169
3183
|
const responseData = {
|
3170
3184
|
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3171
3185
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3172
|
-
modelId: (
|
3186
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
|
3173
3187
|
};
|
3174
3188
|
span2.setAttributes(
|
3175
3189
|
selectTelemetryAttributes({
|
@@ -3223,13 +3237,24 @@ async function generateText({
|
|
3223
3237
|
usage.completionTokens += currentUsage.completionTokens;
|
3224
3238
|
usage.promptTokens += currentUsage.promptTokens;
|
3225
3239
|
usage.totalTokens += currentUsage.totalTokens;
|
3226
|
-
|
3227
|
-
|
3228
|
-
|
3229
|
-
|
3240
|
+
let nextStepType = "done";
|
3241
|
+
if (++stepCount < maxSteps) {
|
3242
|
+
if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
|
3243
|
+
currentToolCalls.length === 0) {
|
3244
|
+
nextStepType = "continue";
|
3245
|
+
} else if (
|
3246
|
+
// there are tool calls:
|
3247
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
3248
|
+
currentToolResults.length === currentToolCalls.length
|
3249
|
+
) {
|
3250
|
+
nextStepType = "tool-result";
|
3251
|
+
}
|
3230
3252
|
}
|
3253
|
+
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
|
3254
|
+
text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
|
3231
3255
|
const currentStep = {
|
3232
|
-
|
3256
|
+
stepType,
|
3257
|
+
text: stepText,
|
3233
3258
|
toolCalls: currentToolCalls,
|
3234
3259
|
toolResults: currentToolResults,
|
3235
3260
|
finishReason: currentModelResponse.finishReason,
|
@@ -3238,20 +3263,20 @@ async function generateText({
|
|
3238
3263
|
logprobs: currentModelResponse.logprobs,
|
3239
3264
|
response: {
|
3240
3265
|
...currentModelResponse.response,
|
3241
|
-
headers: (
|
3266
|
+
headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
|
3242
3267
|
},
|
3243
3268
|
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3244
3269
|
};
|
3245
3270
|
steps.push(currentStep);
|
3246
3271
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
|
3247
|
-
if (stepType === "
|
3272
|
+
if (stepType === "continue") {
|
3248
3273
|
const lastResponseMessage = responseMessages.pop();
|
3249
3274
|
promptMessages.pop();
|
3250
3275
|
if (typeof lastResponseMessage.content === "string") {
|
3251
3276
|
lastResponseMessage.content = text;
|
3252
3277
|
} else {
|
3253
3278
|
lastResponseMessage.content.push({
|
3254
|
-
text:
|
3279
|
+
text: stepText,
|
3255
3280
|
type: "text"
|
3256
3281
|
});
|
3257
3282
|
}
|
@@ -3259,6 +3284,18 @@ async function generateText({
|
|
3259
3284
|
promptMessages.push(
|
3260
3285
|
convertToLanguageModelMessage(lastResponseMessage, null)
|
3261
3286
|
);
|
3287
|
+
} else if (nextStepType === "continue") {
|
3288
|
+
const newResponseMessages = toResponseMessages({
|
3289
|
+
text,
|
3290
|
+
toolCalls: currentToolCalls,
|
3291
|
+
toolResults: currentToolResults
|
3292
|
+
});
|
3293
|
+
responseMessages.push(...newResponseMessages);
|
3294
|
+
promptMessages.push(
|
3295
|
+
...newResponseMessages.map(
|
3296
|
+
(message) => convertToLanguageModelMessage(message, null)
|
3297
|
+
)
|
3298
|
+
);
|
3262
3299
|
} else {
|
3263
3300
|
const newResponseMessages = toResponseMessages({
|
3264
3301
|
text: currentModelResponse.text,
|
@@ -3272,20 +3309,7 @@ async function generateText({
|
|
3272
3309
|
)
|
3273
3310
|
);
|
3274
3311
|
}
|
3275
|
-
|
3276
|
-
stepType = "done";
|
3277
|
-
} else if (continuationSteps === true && currentStep.finishReason === "length" && // only use continuation when there are no tool calls:
|
3278
|
-
currentToolCalls.length === 0) {
|
3279
|
-
stepType = "continuation";
|
3280
|
-
} else if (
|
3281
|
-
// there are tool calls:
|
3282
|
-
currentToolCalls.length > 0 && // all current tool calls have results:
|
3283
|
-
currentToolResults.length === currentToolCalls.length
|
3284
|
-
) {
|
3285
|
-
stepType = "tool-result";
|
3286
|
-
} else {
|
3287
|
-
stepType = "done";
|
3288
|
-
}
|
3312
|
+
stepType = nextStepType;
|
3289
3313
|
} while (stepType !== "done");
|
3290
3314
|
span.setAttributes(
|
3291
3315
|
selectTelemetryAttributes({
|
@@ -3320,7 +3344,7 @@ async function generateText({
|
|
3320
3344
|
warnings: currentModelResponse.warnings,
|
3321
3345
|
response: {
|
3322
3346
|
...currentModelResponse.response,
|
3323
|
-
headers: (
|
3347
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3324
3348
|
},
|
3325
3349
|
logprobs: currentModelResponse.logprobs,
|
3326
3350
|
responseMessages,
|
@@ -3766,6 +3790,7 @@ async function streamText({
|
|
3766
3790
|
headers,
|
3767
3791
|
maxToolRoundtrips = 0,
|
3768
3792
|
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3793
|
+
experimental_continueSteps: continueSteps = false,
|
3769
3794
|
experimental_telemetry: telemetry,
|
3770
3795
|
experimental_providerMetadata: providerMetadata,
|
3771
3796
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
@@ -3910,6 +3935,7 @@ async function streamText({
|
|
3910
3935
|
telemetry,
|
3911
3936
|
startTimestampMs,
|
3912
3937
|
maxSteps,
|
3938
|
+
continueSteps,
|
3913
3939
|
startStep,
|
3914
3940
|
promptMessages,
|
3915
3941
|
modelId: model.modelId,
|
@@ -3933,6 +3959,7 @@ var DefaultStreamTextResult = class {
|
|
3933
3959
|
telemetry,
|
3934
3960
|
startTimestampMs,
|
3935
3961
|
maxSteps,
|
3962
|
+
continueSteps,
|
3936
3963
|
startStep,
|
3937
3964
|
promptMessages,
|
3938
3965
|
modelId,
|
@@ -3984,7 +4011,9 @@ var DefaultStreamTextResult = class {
|
|
3984
4011
|
promptTokens: 0,
|
3985
4012
|
completionTokens: 0,
|
3986
4013
|
totalTokens: 0
|
3987
|
-
}
|
4014
|
+
},
|
4015
|
+
stepType,
|
4016
|
+
previousStepText = ""
|
3988
4017
|
}) {
|
3989
4018
|
const stepToolCalls = [];
|
3990
4019
|
const stepToolResults = [];
|
@@ -3997,12 +4026,25 @@ var DefaultStreamTextResult = class {
|
|
3997
4026
|
let stepProviderMetadata;
|
3998
4027
|
let stepFirstChunk = true;
|
3999
4028
|
let stepText = "";
|
4029
|
+
let fullStepText = stepType === "continue" ? previousStepText : "";
|
4000
4030
|
let stepLogProbs;
|
4001
4031
|
let stepResponse = {
|
4002
4032
|
id: generateId3(),
|
4003
4033
|
timestamp: currentDate(),
|
4004
4034
|
modelId
|
4005
4035
|
};
|
4036
|
+
let chunkBuffer = "";
|
4037
|
+
let chunkTextPublished = false;
|
4038
|
+
async function publishTextChunk({
|
4039
|
+
controller,
|
4040
|
+
chunk
|
4041
|
+
}) {
|
4042
|
+
controller.enqueue(chunk);
|
4043
|
+
stepText += chunk.textDelta;
|
4044
|
+
fullStepText += chunk.textDelta;
|
4045
|
+
chunkTextPublished = true;
|
4046
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4047
|
+
}
|
4006
4048
|
addStream(
|
4007
4049
|
stream2.pipeThrough(
|
4008
4050
|
new TransformStream({
|
@@ -4028,9 +4070,22 @@ var DefaultStreamTextResult = class {
|
|
4028
4070
|
const chunkType = chunk.type;
|
4029
4071
|
switch (chunkType) {
|
4030
4072
|
case "text-delta": {
|
4031
|
-
|
4032
|
-
|
4033
|
-
|
4073
|
+
if (continueSteps) {
|
4074
|
+
chunkBuffer += chunk.textDelta;
|
4075
|
+
const split = splitOnLastWhitespace(chunkBuffer);
|
4076
|
+
if (split != null) {
|
4077
|
+
chunkBuffer = split.suffix;
|
4078
|
+
await publishTextChunk({
|
4079
|
+
controller,
|
4080
|
+
chunk: {
|
4081
|
+
type: "text-delta",
|
4082
|
+
textDelta: split.prefix + split.whitespace
|
4083
|
+
}
|
4084
|
+
});
|
4085
|
+
}
|
4086
|
+
} else {
|
4087
|
+
await publishTextChunk({ controller, chunk });
|
4088
|
+
}
|
4034
4089
|
break;
|
4035
4090
|
}
|
4036
4091
|
case "tool-call": {
|
@@ -4086,6 +4141,30 @@ var DefaultStreamTextResult = class {
|
|
4086
4141
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4087
4142
|
async flush(controller) {
|
4088
4143
|
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4144
|
+
let nextStepType = "done";
|
4145
|
+
if (currentStep + 1 < maxSteps) {
|
4146
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4147
|
+
stepToolCalls.length === 0) {
|
4148
|
+
nextStepType = "continue";
|
4149
|
+
} else if (
|
4150
|
+
// there are tool calls:
|
4151
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4152
|
+
stepToolResults.length === stepToolCalls.length
|
4153
|
+
) {
|
4154
|
+
nextStepType = "tool-result";
|
4155
|
+
}
|
4156
|
+
}
|
4157
|
+
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4158
|
+
stepType === "continue" && !chunkTextPublished)) {
|
4159
|
+
await publishTextChunk({
|
4160
|
+
controller,
|
4161
|
+
chunk: {
|
4162
|
+
type: "text-delta",
|
4163
|
+
textDelta: chunkBuffer
|
4164
|
+
}
|
4165
|
+
});
|
4166
|
+
chunkBuffer = "";
|
4167
|
+
}
|
4089
4168
|
try {
|
4090
4169
|
doStreamSpan2.setAttributes(
|
4091
4170
|
selectTelemetryAttributes({
|
@@ -4129,6 +4208,7 @@ var DefaultStreamTextResult = class {
|
|
4129
4208
|
response: stepResponse
|
4130
4209
|
});
|
4131
4210
|
const stepResult = {
|
4211
|
+
stepType,
|
4132
4212
|
text: stepText,
|
4133
4213
|
toolCalls: stepToolCalls,
|
4134
4214
|
toolResults: stepToolResults,
|
@@ -4147,21 +4227,24 @@ var DefaultStreamTextResult = class {
|
|
4147
4227
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4148
4228
|
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4149
4229
|
};
|
4150
|
-
if (
|
4151
|
-
|
4152
|
-
|
4153
|
-
|
4154
|
-
currentStep + 1 < maxSteps
|
4155
|
-
) {
|
4156
|
-
promptMessages2.push(
|
4157
|
-
...toResponseMessages({
|
4230
|
+
if (nextStepType !== "done") {
|
4231
|
+
if (stepType === "continue") {
|
4232
|
+
const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
|
4233
|
+
lastPromptMessage.content.push({
|
4158
4234
|
text: stepText,
|
4159
|
-
|
4160
|
-
|
4161
|
-
|
4162
|
-
|
4163
|
-
|
4164
|
-
|
4235
|
+
type: "text"
|
4236
|
+
});
|
4237
|
+
} else {
|
4238
|
+
promptMessages2.push(
|
4239
|
+
...toResponseMessages({
|
4240
|
+
text: stepText,
|
4241
|
+
toolCalls: stepToolCalls,
|
4242
|
+
toolResults: stepToolResults
|
4243
|
+
}).map(
|
4244
|
+
(message) => convertToLanguageModelMessage(message, null)
|
4245
|
+
)
|
4246
|
+
);
|
4247
|
+
}
|
4165
4248
|
const {
|
4166
4249
|
result,
|
4167
4250
|
doStreamSpan: doStreamSpan3,
|
@@ -4178,7 +4261,9 @@ var DefaultStreamTextResult = class {
|
|
4178
4261
|
doStreamSpan: doStreamSpan3,
|
4179
4262
|
currentStep: currentStep + 1,
|
4180
4263
|
promptMessages: promptMessages2,
|
4181
|
-
usage: combinedUsage
|
4264
|
+
usage: combinedUsage,
|
4265
|
+
stepType: nextStepType,
|
4266
|
+
previousStepText: fullStepText
|
4182
4267
|
});
|
4183
4268
|
return;
|
4184
4269
|
}
|
@@ -4197,7 +4282,7 @@ var DefaultStreamTextResult = class {
|
|
4197
4282
|
telemetry,
|
4198
4283
|
attributes: {
|
4199
4284
|
"ai.response.finishReason": stepFinishReason,
|
4200
|
-
"ai.response.text": { output: () =>
|
4285
|
+
"ai.response.text": { output: () => fullStepText },
|
4201
4286
|
"ai.response.toolCalls": {
|
4202
4287
|
output: () => stepToolCallsJson
|
4203
4288
|
},
|
@@ -4205,27 +4290,38 @@ var DefaultStreamTextResult = class {
|
|
4205
4290
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
4206
4291
|
// deprecated
|
4207
4292
|
"ai.finishReason": stepFinishReason,
|
4208
|
-
"ai.result.text": { output: () =>
|
4293
|
+
"ai.result.text": { output: () => fullStepText },
|
4209
4294
|
"ai.result.toolCalls": {
|
4210
4295
|
output: () => stepToolCallsJson
|
4211
4296
|
}
|
4212
4297
|
}
|
4213
4298
|
})
|
4214
4299
|
);
|
4215
|
-
const responseMessages = stepResults.reduce(
|
4216
|
-
(
|
4300
|
+
const responseMessages = stepResults.reduce((responseMessages2, step) => {
|
4301
|
+
if (step.stepType === "continue") {
|
4302
|
+
const lastResponseMessage = responseMessages2.pop();
|
4303
|
+
if (typeof lastResponseMessage.content === "string") {
|
4304
|
+
lastResponseMessage.content += step.text;
|
4305
|
+
} else {
|
4306
|
+
lastResponseMessage.content.push({
|
4307
|
+
text: step.text,
|
4308
|
+
type: "text"
|
4309
|
+
});
|
4310
|
+
}
|
4311
|
+
return [...responseMessages2, lastResponseMessage];
|
4312
|
+
}
|
4313
|
+
return [
|
4217
4314
|
...responseMessages2,
|
4218
4315
|
...toResponseMessages({
|
4219
4316
|
text: step.text,
|
4220
4317
|
toolCalls: step.toolCalls,
|
4221
4318
|
toolResults: step.toolResults
|
4222
4319
|
})
|
4223
|
-
]
|
4224
|
-
|
4225
|
-
);
|
4320
|
+
];
|
4321
|
+
}, []);
|
4226
4322
|
resolveUsage(combinedUsage);
|
4227
4323
|
resolveFinishReason(stepFinishReason);
|
4228
|
-
resolveText(
|
4324
|
+
resolveText(fullStepText);
|
4229
4325
|
resolveToolCalls(stepToolCalls);
|
4230
4326
|
resolveProviderMetadata(stepProviderMetadata);
|
4231
4327
|
resolveToolResults(stepToolResults);
|
@@ -4239,7 +4335,7 @@ var DefaultStreamTextResult = class {
|
|
4239
4335
|
finishReason: stepFinishReason,
|
4240
4336
|
logprobs: stepLogProbs,
|
4241
4337
|
usage: combinedUsage,
|
4242
|
-
text:
|
4338
|
+
text: fullStepText,
|
4243
4339
|
toolCalls: stepToolCalls,
|
4244
4340
|
// The tool results are inferred as a never[] type, because they are
|
4245
4341
|
// optional and the execute method with an inferred result type is
|
@@ -4272,7 +4368,8 @@ var DefaultStreamTextResult = class {
|
|
4272
4368
|
doStreamSpan,
|
4273
4369
|
currentStep: 0,
|
4274
4370
|
promptMessages,
|
4275
|
-
usage: void 0
|
4371
|
+
usage: void 0,
|
4372
|
+
stepType: "initial"
|
4276
4373
|
});
|
4277
4374
|
}
|
4278
4375
|
/**
|