ai 3.4.1 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # ai
2
2
 
3
+ ## 3.4.3
4
+
5
+ ### Patch Changes
6
+
7
+ - b270ae3: feat (ai/core): streamText continueSteps (experimental)
8
+ - b270ae3: chore (ai/core): rename generateText continuationSteps to continueSteps
9
+
10
+ ## 3.4.2
11
+
12
+ ### Patch Changes
13
+
14
+ - e6c7e98: feat (ai/core): add continuationSteps to generateText
15
+
3
16
  ## 3.4.1
4
17
 
5
18
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1362
1362
  results that can be fully encapsulated in the provider.
1363
1363
  */
1364
1364
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1365
+ /**
1366
+ The type of step that this result is for. The first step is always
1367
+ an "initial" step, and subsequent steps are either "continue" steps
1368
+ or "tool-result" steps.
1369
+ */
1370
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
1365
1371
  };
1366
1372
 
1367
1373
  /**
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
1491
1497
  @returns
1492
1498
  A result object that contains the generated text, the results of the tool calls, and additional information.
1493
1499
  */
1494
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1500
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1495
1501
  /**
1496
1502
  The language model to use.
1497
1503
  */
@@ -1532,7 +1538,17 @@ By default, it's set to 1, which means that only a single LLM call is made.
1532
1538
  */
1533
1539
  maxSteps?: number;
1534
1540
  /**
1535
- * Optional telemetry configuration (experimental).
1541
+ @deprecated Use `experimental_continueSteps` instead.
1542
+ */
1543
+ experimental_continuationSteps?: boolean;
1544
+ /**
1545
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1546
+
1547
+ By default, it's set to false.
1548
+ */
1549
+ experimental_continueSteps?: boolean;
1550
+ /**
1551
+ Optional telemetry configuration (experimental).
1536
1552
  */
1537
1553
  experimental_telemetry?: TelemetrySettings;
1538
1554
  /**
@@ -1831,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
1831
1847
  @return
1832
1848
  A result object for accessing different stream types and additional information.
1833
1849
  */
1834
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1850
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1835
1851
  /**
1836
1852
  The language model to use.
1837
1853
  */
@@ -1868,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
1868
1884
  */
1869
1885
  maxSteps?: number;
1870
1886
  /**
1887
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1888
+
1889
+ By default, it's set to false.
1890
+ */
1891
+ experimental_continueSteps?: boolean;
1892
+ /**
1871
1893
  Optional telemetry configuration (experimental).
1872
1894
  */
1873
1895
  experimental_telemetry?: TelemetrySettings;
@@ -1895,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
1895
1917
 
1896
1918
  The usage is the combined usage of all steps.
1897
1919
  */
1898
- onFinish?: (event: StepResult<TOOLS> & {
1920
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1899
1921
  /**
1900
1922
  Details for all steps.
1901
1923
  */
package/dist/index.d.ts CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1362
1362
  results that can be fully encapsulated in the provider.
1363
1363
  */
1364
1364
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1365
+ /**
1366
+ The type of step that this result is for. The first step is always
1367
+ an "initial" step, and subsequent steps are either "continue" steps
1368
+ or "tool-result" steps.
1369
+ */
1370
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
1365
1371
  };
1366
1372
 
1367
1373
  /**
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
1491
1497
  @returns
1492
1498
  A result object that contains the generated text, the results of the tool calls, and additional information.
1493
1499
  */
1494
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1500
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1495
1501
  /**
1496
1502
  The language model to use.
1497
1503
  */
@@ -1532,7 +1538,17 @@ By default, it's set to 1, which means that only a single LLM call is made.
1532
1538
  */
1533
1539
  maxSteps?: number;
1534
1540
  /**
1535
- * Optional telemetry configuration (experimental).
1541
+ @deprecated Use `experimental_continueSteps` instead.
1542
+ */
1543
+ experimental_continuationSteps?: boolean;
1544
+ /**
1545
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1546
+
1547
+ By default, it's set to false.
1548
+ */
1549
+ experimental_continueSteps?: boolean;
1550
+ /**
1551
+ Optional telemetry configuration (experimental).
1536
1552
  */
1537
1553
  experimental_telemetry?: TelemetrySettings;
1538
1554
  /**
@@ -1831,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
1831
1847
  @return
1832
1848
  A result object for accessing different stream types and additional information.
1833
1849
  */
1834
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1850
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1835
1851
  /**
1836
1852
  The language model to use.
1837
1853
  */
@@ -1868,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
1868
1884
  */
1869
1885
  maxSteps?: number;
1870
1886
  /**
1887
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1888
+
1889
+ By default, it's set to false.
1890
+ */
1891
+ experimental_continueSteps?: boolean;
1892
+ /**
1871
1893
  Optional telemetry configuration (experimental).
1872
1894
  */
1873
1895
  experimental_telemetry?: TelemetrySettings;
@@ -1895,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
1895
1917
 
1896
1918
  The usage is the combined usage of all steps.
1897
1919
  */
1898
- onFinish?: (event: StepResult<TOOLS> & {
1920
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1899
1921
  /**
1900
1922
  Details for all steps.
1901
1923
  */
package/dist/index.js CHANGED
@@ -3052,6 +3052,8 @@ async function generateText({
3052
3052
  maxAutomaticRoundtrips = 0,
3053
3053
  maxToolRoundtrips = maxAutomaticRoundtrips,
3054
3054
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3055
+ experimental_continuationSteps,
3056
+ experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
3055
3057
  experimental_telemetry: telemetry,
3056
3058
  experimental_providerMetadata: providerMetadata,
3057
3059
  _internal: {
@@ -3095,7 +3097,7 @@ async function generateText({
3095
3097
  }),
3096
3098
  tracer,
3097
3099
  fn: async (span) => {
3098
- var _a12, _b, _c, _d, _e;
3100
+ var _a12, _b, _c, _d, _e, _f, _g;
3099
3101
  const retry = retryWithExponentialBackoff({ maxRetries });
3100
3102
  const validatedPrompt = validatePrompt({
3101
3103
  system,
@@ -3116,12 +3118,14 @@ async function generateText({
3116
3118
  let currentToolResults = [];
3117
3119
  let stepCount = 0;
3118
3120
  const responseMessages = [];
3121
+ let text = "";
3119
3122
  const steps = [];
3120
3123
  const usage = {
3121
3124
  completionTokens: 0,
3122
3125
  promptTokens: 0,
3123
3126
  totalTokens: 0
3124
3127
  };
3128
+ let stepType = "initial";
3125
3129
  do {
3126
3130
  const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
3127
3131
  currentModelResponse = await retry(
@@ -3153,7 +3157,7 @@ async function generateText({
3153
3157
  }),
3154
3158
  tracer,
3155
3159
  fn: async (span2) => {
3156
- var _a13, _b2, _c2, _d2, _e2, _f;
3160
+ var _a13, _b2, _c2, _d2, _e2, _f2;
3157
3161
  const result = await model.doGenerate({
3158
3162
  mode,
3159
3163
  ...callSettings,
@@ -3166,7 +3170,7 @@ async function generateText({
3166
3170
  const responseData = {
3167
3171
  id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
3168
3172
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3169
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3173
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3170
3174
  };
3171
3175
  span2.setAttributes(
3172
3176
  selectTelemetryAttributes({
@@ -3220,8 +3224,14 @@ async function generateText({
3220
3224
  usage.completionTokens += currentUsage.completionTokens;
3221
3225
  usage.promptTokens += currentUsage.promptTokens;
3222
3226
  usage.totalTokens += currentUsage.totalTokens;
3227
+ if (stepType === "continue") {
3228
+ text += (_b = currentModelResponse.text) != null ? _b : "";
3229
+ } else {
3230
+ text = (_c = currentModelResponse.text) != null ? _c : "";
3231
+ }
3223
3232
  const currentStep = {
3224
- text: (_b = currentModelResponse.text) != null ? _b : "",
3233
+ stepType,
3234
+ text: (_d = currentModelResponse.text) != null ? _d : "",
3225
3235
  toolCalls: currentToolCalls,
3226
3236
  toolResults: currentToolResults,
3227
3237
  finishReason: currentModelResponse.finishReason,
@@ -3230,29 +3240,55 @@ async function generateText({
3230
3240
  logprobs: currentModelResponse.logprobs,
3231
3241
  response: {
3232
3242
  ...currentModelResponse.response,
3233
- headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
3243
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3234
3244
  },
3235
3245
  experimental_providerMetadata: currentModelResponse.providerMetadata
3236
3246
  };
3237
3247
  steps.push(currentStep);
3238
3248
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3239
- const newResponseMessages = toResponseMessages({
3240
- text: currentModelResponse.text,
3241
- toolCalls: currentToolCalls,
3242
- toolResults: currentToolResults
3243
- });
3244
- responseMessages.push(...newResponseMessages);
3245
- promptMessages.push(
3246
- ...newResponseMessages.map(
3247
- (message) => convertToLanguageModelMessage(message, null)
3248
- )
3249
- );
3250
- } while (
3251
- // there are tool calls:
3252
- currentToolCalls.length > 0 && // all current tool calls have results:
3253
- currentToolResults.length === currentToolCalls.length && // the number of steps is less than the maximum:
3254
- ++stepCount < maxSteps
3255
- );
3249
+ if (stepType === "continue") {
3250
+ const lastResponseMessage = responseMessages.pop();
3251
+ promptMessages.pop();
3252
+ if (typeof lastResponseMessage.content === "string") {
3253
+ lastResponseMessage.content = text;
3254
+ } else {
3255
+ lastResponseMessage.content.push({
3256
+ text: (_f = currentModelResponse.text) != null ? _f : "",
3257
+ type: "text"
3258
+ });
3259
+ }
3260
+ responseMessages.push(lastResponseMessage);
3261
+ promptMessages.push(
3262
+ convertToLanguageModelMessage(lastResponseMessage, null)
3263
+ );
3264
+ } else {
3265
+ const newResponseMessages = toResponseMessages({
3266
+ text: currentModelResponse.text,
3267
+ toolCalls: currentToolCalls,
3268
+ toolResults: currentToolResults
3269
+ });
3270
+ responseMessages.push(...newResponseMessages);
3271
+ promptMessages.push(
3272
+ ...newResponseMessages.map(
3273
+ (message) => convertToLanguageModelMessage(message, null)
3274
+ )
3275
+ );
3276
+ }
3277
+ if (++stepCount >= maxSteps) {
3278
+ stepType = "done";
3279
+ } else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
3280
+ currentToolCalls.length === 0) {
3281
+ stepType = "continue";
3282
+ } else if (
3283
+ // there are tool calls:
3284
+ currentToolCalls.length > 0 && // all current tool calls have results:
3285
+ currentToolResults.length === currentToolCalls.length
3286
+ ) {
3287
+ stepType = "tool-result";
3288
+ } else {
3289
+ stepType = "done";
3290
+ }
3291
+ } while (stepType !== "done");
3256
3292
  span.setAttributes(
3257
3293
  selectTelemetryAttributes({
3258
3294
  telemetry,
@@ -3278,10 +3314,7 @@ async function generateText({
3278
3314
  })
3279
3315
  );
3280
3316
  return new DefaultGenerateTextResult({
3281
- // Always return a string so that the caller doesn't have to check for undefined.
3282
- // If they need to check if the model did not return any text,
3283
- // they can check the length of the string:
3284
- text: (_d = currentModelResponse.text) != null ? _d : "",
3317
+ text,
3285
3318
  toolCalls: currentToolCalls,
3286
3319
  toolResults: currentToolResults,
3287
3320
  finishReason: currentModelResponse.finishReason,
@@ -3289,7 +3322,7 @@ async function generateText({
3289
3322
  warnings: currentModelResponse.warnings,
3290
3323
  response: {
3291
3324
  ...currentModelResponse.response,
3292
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3325
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers
3293
3326
  },
3294
3327
  logprobs: currentModelResponse.logprobs,
3295
3328
  responseMessages,
@@ -3735,6 +3768,7 @@ async function streamText({
3735
3768
  headers,
3736
3769
  maxToolRoundtrips = 0,
3737
3770
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3771
+ experimental_continueSteps: continueSteps = false,
3738
3772
  experimental_telemetry: telemetry,
3739
3773
  experimental_providerMetadata: providerMetadata,
3740
3774
  experimental_toolCallStreaming: toolCallStreaming = false,
@@ -3879,6 +3913,7 @@ async function streamText({
3879
3913
  telemetry,
3880
3914
  startTimestampMs,
3881
3915
  maxSteps,
3916
+ continueSteps,
3882
3917
  startStep,
3883
3918
  promptMessages,
3884
3919
  modelId: model.modelId,
@@ -3902,6 +3937,7 @@ var DefaultStreamTextResult = class {
3902
3937
  telemetry,
3903
3938
  startTimestampMs,
3904
3939
  maxSteps,
3940
+ continueSteps,
3905
3941
  startStep,
3906
3942
  promptMessages,
3907
3943
  modelId,
@@ -3953,7 +3989,9 @@ var DefaultStreamTextResult = class {
3953
3989
  promptTokens: 0,
3954
3990
  completionTokens: 0,
3955
3991
  totalTokens: 0
3956
- }
3992
+ },
3993
+ stepType,
3994
+ previousStepText = ""
3957
3995
  }) {
3958
3996
  const stepToolCalls = [];
3959
3997
  const stepToolResults = [];
@@ -3966,6 +4004,7 @@ var DefaultStreamTextResult = class {
3966
4004
  let stepProviderMetadata;
3967
4005
  let stepFirstChunk = true;
3968
4006
  let stepText = "";
4007
+ let fullStepText = stepType === "continue" ? previousStepText : "";
3969
4008
  let stepLogProbs;
3970
4009
  let stepResponse = {
3971
4010
  id: generateId3(),
@@ -3999,6 +4038,7 @@ var DefaultStreamTextResult = class {
3999
4038
  case "text-delta": {
4000
4039
  controller.enqueue(chunk);
4001
4040
  stepText += chunk.textDelta;
4041
+ fullStepText += chunk.textDelta;
4002
4042
  await (onChunk == null ? void 0 : onChunk({ chunk }));
4003
4043
  break;
4004
4044
  }
@@ -4098,6 +4138,7 @@ var DefaultStreamTextResult = class {
4098
4138
  response: stepResponse
4099
4139
  });
4100
4140
  const stepResult = {
4141
+ stepType,
4101
4142
  text: stepText,
4102
4143
  toolCalls: stepToolCalls,
4103
4144
  toolResults: stepToolResults,
@@ -4116,21 +4157,37 @@ var DefaultStreamTextResult = class {
4116
4157
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4117
4158
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4118
4159
  };
4119
- if (
4120
- // there are tool calls:
4121
- stepToolCalls.length > 0 && // all current tool calls have results:
4122
- stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
4123
- currentStep + 1 < maxSteps
4124
- ) {
4125
- promptMessages2.push(
4126
- ...toResponseMessages({
4160
+ let nextStepType = "done";
4161
+ if (currentStep + 1 < maxSteps) {
4162
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4163
+ stepToolCalls.length === 0) {
4164
+ nextStepType = "continue";
4165
+ } else if (
4166
+ // there are tool calls:
4167
+ stepToolCalls.length > 0 && // all current tool calls have results:
4168
+ stepToolResults.length === stepToolCalls.length
4169
+ ) {
4170
+ nextStepType = "tool-result";
4171
+ }
4172
+ }
4173
+ if (nextStepType !== "done") {
4174
+ if (stepType === "continue") {
4175
+ const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4176
+ lastPromptMessage.content.push({
4127
4177
  text: stepText,
4128
- toolCalls: stepToolCalls,
4129
- toolResults: stepToolResults
4130
- }).map(
4131
- (message) => convertToLanguageModelMessage(message, null)
4132
- )
4133
- );
4178
+ type: "text"
4179
+ });
4180
+ } else {
4181
+ promptMessages2.push(
4182
+ ...toResponseMessages({
4183
+ text: stepText,
4184
+ toolCalls: stepToolCalls,
4185
+ toolResults: stepToolResults
4186
+ }).map(
4187
+ (message) => convertToLanguageModelMessage(message, null)
4188
+ )
4189
+ );
4190
+ }
4134
4191
  const {
4135
4192
  result,
4136
4193
  doStreamSpan: doStreamSpan3,
@@ -4147,7 +4204,9 @@ var DefaultStreamTextResult = class {
4147
4204
  doStreamSpan: doStreamSpan3,
4148
4205
  currentStep: currentStep + 1,
4149
4206
  promptMessages: promptMessages2,
4150
- usage: combinedUsage
4207
+ usage: combinedUsage,
4208
+ stepType: nextStepType,
4209
+ previousStepText: fullStepText
4151
4210
  });
4152
4211
  return;
4153
4212
  }
@@ -4166,7 +4225,7 @@ var DefaultStreamTextResult = class {
4166
4225
  telemetry,
4167
4226
  attributes: {
4168
4227
  "ai.response.finishReason": stepFinishReason,
4169
- "ai.response.text": { output: () => stepText },
4228
+ "ai.response.text": { output: () => fullStepText },
4170
4229
  "ai.response.toolCalls": {
4171
4230
  output: () => stepToolCallsJson
4172
4231
  },
@@ -4174,27 +4233,38 @@ var DefaultStreamTextResult = class {
4174
4233
  "ai.usage.completionTokens": combinedUsage.completionTokens,
4175
4234
  // deprecated
4176
4235
  "ai.finishReason": stepFinishReason,
4177
- "ai.result.text": { output: () => stepText },
4236
+ "ai.result.text": { output: () => fullStepText },
4178
4237
  "ai.result.toolCalls": {
4179
4238
  output: () => stepToolCallsJson
4180
4239
  }
4181
4240
  }
4182
4241
  })
4183
4242
  );
4184
- const responseMessages = stepResults.reduce(
4185
- (responseMessages2, step) => [
4243
+ const responseMessages = stepResults.reduce((responseMessages2, step) => {
4244
+ if (step.stepType === "continue") {
4245
+ const lastResponseMessage = responseMessages2.pop();
4246
+ if (typeof lastResponseMessage.content === "string") {
4247
+ lastResponseMessage.content += step.text;
4248
+ } else {
4249
+ lastResponseMessage.content.push({
4250
+ text: step.text,
4251
+ type: "text"
4252
+ });
4253
+ }
4254
+ return [...responseMessages2, lastResponseMessage];
4255
+ }
4256
+ return [
4186
4257
  ...responseMessages2,
4187
4258
  ...toResponseMessages({
4188
4259
  text: step.text,
4189
4260
  toolCalls: step.toolCalls,
4190
4261
  toolResults: step.toolResults
4191
4262
  })
4192
- ],
4193
- []
4194
- );
4263
+ ];
4264
+ }, []);
4195
4265
  resolveUsage(combinedUsage);
4196
4266
  resolveFinishReason(stepFinishReason);
4197
- resolveText(stepText);
4267
+ resolveText(fullStepText);
4198
4268
  resolveToolCalls(stepToolCalls);
4199
4269
  resolveProviderMetadata(stepProviderMetadata);
4200
4270
  resolveToolResults(stepToolResults);
@@ -4208,7 +4278,7 @@ var DefaultStreamTextResult = class {
4208
4278
  finishReason: stepFinishReason,
4209
4279
  logprobs: stepLogProbs,
4210
4280
  usage: combinedUsage,
4211
- text: stepText,
4281
+ text: fullStepText,
4212
4282
  toolCalls: stepToolCalls,
4213
4283
  // The tool results are inferred as a never[] type, because they are
4214
4284
  // optional and the execute method with an inferred result type is
@@ -4241,7 +4311,8 @@ var DefaultStreamTextResult = class {
4241
4311
  doStreamSpan,
4242
4312
  currentStep: 0,
4243
4313
  promptMessages,
4244
- usage: void 0
4314
+ usage: void 0,
4315
+ stepType: "initial"
4245
4316
  });
4246
4317
  }
4247
4318
  /**