ai 3.4.2 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # ai
2
2
 
3
+ ## 3.4.3
4
+
5
+ ### Patch Changes
6
+
7
+ - b270ae3: feat (ai/core): streamText continueSteps (experimental)
8
+ - b270ae3: chore (ai/core): rename generateText continuationSteps to continueSteps
9
+
3
10
  ## 3.4.2
4
11
 
5
12
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1362
1362
  results that can be fully encapsulated in the provider.
1363
1363
  */
1364
1364
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1365
+ /**
1366
+ The type of step that this result is for. The first step is always
1367
+ an "initial" step, and subsequent steps are either "continue" steps
1368
+ or "tool-result" steps.
1369
+ */
1370
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
1365
1371
  };
1366
1372
 
1367
1373
  /**
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
1491
1497
  @returns
1492
1498
  A result object that contains the generated text, the results of the tool calls, and additional information.
1493
1499
  */
1494
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps: continuationSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1500
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1495
1501
  /**
1496
1502
  The language model to use.
1497
1503
  */
@@ -1531,9 +1537,18 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
1531
1537
  By default, it's set to 1, which means that only a single LLM call is made.
1532
1538
  */
1533
1539
  maxSteps?: number;
1540
+ /**
1541
+ @deprecated Use `experimental_continueSteps` instead.
1542
+ */
1534
1543
  experimental_continuationSteps?: boolean;
1535
1544
  /**
1536
- * Optional telemetry configuration (experimental).
1545
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1546
+
1547
+ By default, it's set to false.
1548
+ */
1549
+ experimental_continueSteps?: boolean;
1550
+ /**
1551
+ Optional telemetry configuration (experimental).
1537
1552
  */
1538
1553
  experimental_telemetry?: TelemetrySettings;
1539
1554
  /**
@@ -1832,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
1832
1847
  @return
1833
1848
  A result object for accessing different stream types and additional information.
1834
1849
  */
1835
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1850
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1836
1851
  /**
1837
1852
  The language model to use.
1838
1853
  */
@@ -1869,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
1869
1884
  */
1870
1885
  maxSteps?: number;
1871
1886
  /**
1887
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1888
+
1889
+ By default, it's set to false.
1890
+ */
1891
+ experimental_continueSteps?: boolean;
1892
+ /**
1872
1893
  Optional telemetry configuration (experimental).
1873
1894
  */
1874
1895
  experimental_telemetry?: TelemetrySettings;
@@ -1896,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
1896
1917
 
1897
1918
  The usage is the combined usage of all steps.
1898
1919
  */
1899
- onFinish?: (event: StepResult<TOOLS> & {
1920
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1900
1921
  /**
1901
1922
  Details for all steps.
1902
1923
  */
package/dist/index.d.ts CHANGED
@@ -1362,6 +1362,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1362
1362
  results that can be fully encapsulated in the provider.
1363
1363
  */
1364
1364
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1365
+ /**
1366
+ The type of step that this result is for. The first step is always
1367
+ an "initial" step, and subsequent steps are either "continue" steps
1368
+ or "tool-result" steps.
1369
+ */
1370
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
1365
1371
  };
1366
1372
 
1367
1373
  /**
@@ -1491,7 +1497,7 @@ If set and supported by the model, calls will generate deterministic results.
1491
1497
  @returns
1492
1498
  A result object that contains the generated text, the results of the tool calls, and additional information.
1493
1499
  */
1494
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps: continuationSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1500
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1495
1501
  /**
1496
1502
  The language model to use.
1497
1503
  */
@@ -1531,9 +1537,18 @@ A maximum number is required to prevent infinite loops in the case of misconfigu
1531
1537
  By default, it's set to 1, which means that only a single LLM call is made.
1532
1538
  */
1533
1539
  maxSteps?: number;
1540
+ /**
1541
+ @deprecated Use `experimental_continueSteps` instead.
1542
+ */
1534
1543
  experimental_continuationSteps?: boolean;
1535
1544
  /**
1536
- * Optional telemetry configuration (experimental).
1545
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1546
+
1547
+ By default, it's set to false.
1548
+ */
1549
+ experimental_continueSteps?: boolean;
1550
+ /**
1551
+ Optional telemetry configuration (experimental).
1537
1552
  */
1538
1553
  experimental_telemetry?: TelemetrySettings;
1539
1554
  /**
@@ -1832,7 +1847,7 @@ If set and supported by the model, calls will generate deterministic results.
1832
1847
  @return
1833
1848
  A result object for accessing different stream types and additional information.
1834
1849
  */
1835
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1850
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1836
1851
  /**
1837
1852
  The language model to use.
1838
1853
  */
@@ -1869,6 +1884,12 @@ By default, it's set to 1, which means that only a single LLM call is made.
1869
1884
  */
1870
1885
  maxSteps?: number;
1871
1886
  /**
1887
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1888
+
1889
+ By default, it's set to false.
1890
+ */
1891
+ experimental_continueSteps?: boolean;
1892
+ /**
1872
1893
  Optional telemetry configuration (experimental).
1873
1894
  */
1874
1895
  experimental_telemetry?: TelemetrySettings;
@@ -1896,7 +1917,7 @@ Callback that is called when the LLM response and all request tool executions
1896
1917
 
1897
1918
  The usage is the combined usage of all steps.
1898
1919
  */
1899
- onFinish?: (event: StepResult<TOOLS> & {
1920
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1900
1921
  /**
1901
1922
  Details for all steps.
1902
1923
  */
package/dist/index.js CHANGED
@@ -3052,7 +3052,8 @@ async function generateText({
3052
3052
  maxAutomaticRoundtrips = 0,
3053
3053
  maxToolRoundtrips = maxAutomaticRoundtrips,
3054
3054
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3055
- experimental_continuationSteps: continuationSteps = false,
3055
+ experimental_continuationSteps,
3056
+ experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
3056
3057
  experimental_telemetry: telemetry,
3057
3058
  experimental_providerMetadata: providerMetadata,
3058
3059
  _internal: {
@@ -3096,7 +3097,7 @@ async function generateText({
3096
3097
  }),
3097
3098
  tracer,
3098
3099
  fn: async (span) => {
3099
- var _a12, _b, _c, _d, _e, _f;
3100
+ var _a12, _b, _c, _d, _e, _f, _g;
3100
3101
  const retry = retryWithExponentialBackoff({ maxRetries });
3101
3102
  const validatedPrompt = validatePrompt({
3102
3103
  system,
@@ -3223,12 +3224,13 @@ async function generateText({
3223
3224
  usage.completionTokens += currentUsage.completionTokens;
3224
3225
  usage.promptTokens += currentUsage.promptTokens;
3225
3226
  usage.totalTokens += currentUsage.totalTokens;
3226
- if (stepType === "continuation") {
3227
- text += " " + ((_b = currentModelResponse.text) != null ? _b : "");
3227
+ if (stepType === "continue") {
3228
+ text += (_b = currentModelResponse.text) != null ? _b : "";
3228
3229
  } else {
3229
3230
  text = (_c = currentModelResponse.text) != null ? _c : "";
3230
3231
  }
3231
3232
  const currentStep = {
3233
+ stepType,
3232
3234
  text: (_d = currentModelResponse.text) != null ? _d : "",
3233
3235
  toolCalls: currentToolCalls,
3234
3236
  toolResults: currentToolResults,
@@ -3244,14 +3246,14 @@ async function generateText({
3244
3246
  };
3245
3247
  steps.push(currentStep);
3246
3248
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3247
- if (stepType === "continuation") {
3249
+ if (stepType === "continue") {
3248
3250
  const lastResponseMessage = responseMessages.pop();
3249
3251
  promptMessages.pop();
3250
3252
  if (typeof lastResponseMessage.content === "string") {
3251
3253
  lastResponseMessage.content = text;
3252
3254
  } else {
3253
3255
  lastResponseMessage.content.push({
3254
- text: " " + currentModelResponse.text,
3256
+ text: (_f = currentModelResponse.text) != null ? _f : "",
3255
3257
  type: "text"
3256
3258
  });
3257
3259
  }
@@ -3274,9 +3276,9 @@ async function generateText({
3274
3276
  }
3275
3277
  if (++stepCount >= maxSteps) {
3276
3278
  stepType = "done";
3277
- } else if (continuationSteps === true && currentStep.finishReason === "length" && // only use continuation when there are no tool calls:
3279
+ } else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
3278
3280
  currentToolCalls.length === 0) {
3279
- stepType = "continuation";
3281
+ stepType = "continue";
3280
3282
  } else if (
3281
3283
  // there are tool calls:
3282
3284
  currentToolCalls.length > 0 && // all current tool calls have results:
@@ -3320,7 +3322,7 @@ async function generateText({
3320
3322
  warnings: currentModelResponse.warnings,
3321
3323
  response: {
3322
3324
  ...currentModelResponse.response,
3323
- headers: (_f = currentModelResponse.rawResponse) == null ? void 0 : _f.headers
3325
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers
3324
3326
  },
3325
3327
  logprobs: currentModelResponse.logprobs,
3326
3328
  responseMessages,
@@ -3766,6 +3768,7 @@ async function streamText({
3766
3768
  headers,
3767
3769
  maxToolRoundtrips = 0,
3768
3770
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3771
+ experimental_continueSteps: continueSteps = false,
3769
3772
  experimental_telemetry: telemetry,
3770
3773
  experimental_providerMetadata: providerMetadata,
3771
3774
  experimental_toolCallStreaming: toolCallStreaming = false,
@@ -3910,6 +3913,7 @@ async function streamText({
3910
3913
  telemetry,
3911
3914
  startTimestampMs,
3912
3915
  maxSteps,
3916
+ continueSteps,
3913
3917
  startStep,
3914
3918
  promptMessages,
3915
3919
  modelId: model.modelId,
@@ -3933,6 +3937,7 @@ var DefaultStreamTextResult = class {
3933
3937
  telemetry,
3934
3938
  startTimestampMs,
3935
3939
  maxSteps,
3940
+ continueSteps,
3936
3941
  startStep,
3937
3942
  promptMessages,
3938
3943
  modelId,
@@ -3984,7 +3989,9 @@ var DefaultStreamTextResult = class {
3984
3989
  promptTokens: 0,
3985
3990
  completionTokens: 0,
3986
3991
  totalTokens: 0
3987
- }
3992
+ },
3993
+ stepType,
3994
+ previousStepText = ""
3988
3995
  }) {
3989
3996
  const stepToolCalls = [];
3990
3997
  const stepToolResults = [];
@@ -3997,6 +4004,7 @@ var DefaultStreamTextResult = class {
3997
4004
  let stepProviderMetadata;
3998
4005
  let stepFirstChunk = true;
3999
4006
  let stepText = "";
4007
+ let fullStepText = stepType === "continue" ? previousStepText : "";
4000
4008
  let stepLogProbs;
4001
4009
  let stepResponse = {
4002
4010
  id: generateId3(),
@@ -4030,6 +4038,7 @@ var DefaultStreamTextResult = class {
4030
4038
  case "text-delta": {
4031
4039
  controller.enqueue(chunk);
4032
4040
  stepText += chunk.textDelta;
4041
+ fullStepText += chunk.textDelta;
4033
4042
  await (onChunk == null ? void 0 : onChunk({ chunk }));
4034
4043
  break;
4035
4044
  }
@@ -4129,6 +4138,7 @@ var DefaultStreamTextResult = class {
4129
4138
  response: stepResponse
4130
4139
  });
4131
4140
  const stepResult = {
4141
+ stepType,
4132
4142
  text: stepText,
4133
4143
  toolCalls: stepToolCalls,
4134
4144
  toolResults: stepToolResults,
@@ -4147,21 +4157,37 @@ var DefaultStreamTextResult = class {
4147
4157
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4148
4158
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4149
4159
  };
4150
- if (
4151
- // there are tool calls:
4152
- stepToolCalls.length > 0 && // all current tool calls have results:
4153
- stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
4154
- currentStep + 1 < maxSteps
4155
- ) {
4156
- promptMessages2.push(
4157
- ...toResponseMessages({
4160
+ let nextStepType = "done";
4161
+ if (currentStep + 1 < maxSteps) {
4162
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4163
+ stepToolCalls.length === 0) {
4164
+ nextStepType = "continue";
4165
+ } else if (
4166
+ // there are tool calls:
4167
+ stepToolCalls.length > 0 && // all current tool calls have results:
4168
+ stepToolResults.length === stepToolCalls.length
4169
+ ) {
4170
+ nextStepType = "tool-result";
4171
+ }
4172
+ }
4173
+ if (nextStepType !== "done") {
4174
+ if (stepType === "continue") {
4175
+ const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4176
+ lastPromptMessage.content.push({
4158
4177
  text: stepText,
4159
- toolCalls: stepToolCalls,
4160
- toolResults: stepToolResults
4161
- }).map(
4162
- (message) => convertToLanguageModelMessage(message, null)
4163
- )
4164
- );
4178
+ type: "text"
4179
+ });
4180
+ } else {
4181
+ promptMessages2.push(
4182
+ ...toResponseMessages({
4183
+ text: stepText,
4184
+ toolCalls: stepToolCalls,
4185
+ toolResults: stepToolResults
4186
+ }).map(
4187
+ (message) => convertToLanguageModelMessage(message, null)
4188
+ )
4189
+ );
4190
+ }
4165
4191
  const {
4166
4192
  result,
4167
4193
  doStreamSpan: doStreamSpan3,
@@ -4178,7 +4204,9 @@ var DefaultStreamTextResult = class {
4178
4204
  doStreamSpan: doStreamSpan3,
4179
4205
  currentStep: currentStep + 1,
4180
4206
  promptMessages: promptMessages2,
4181
- usage: combinedUsage
4207
+ usage: combinedUsage,
4208
+ stepType: nextStepType,
4209
+ previousStepText: fullStepText
4182
4210
  });
4183
4211
  return;
4184
4212
  }
@@ -4197,7 +4225,7 @@ var DefaultStreamTextResult = class {
4197
4225
  telemetry,
4198
4226
  attributes: {
4199
4227
  "ai.response.finishReason": stepFinishReason,
4200
- "ai.response.text": { output: () => stepText },
4228
+ "ai.response.text": { output: () => fullStepText },
4201
4229
  "ai.response.toolCalls": {
4202
4230
  output: () => stepToolCallsJson
4203
4231
  },
@@ -4205,27 +4233,38 @@ var DefaultStreamTextResult = class {
4205
4233
  "ai.usage.completionTokens": combinedUsage.completionTokens,
4206
4234
  // deprecated
4207
4235
  "ai.finishReason": stepFinishReason,
4208
- "ai.result.text": { output: () => stepText },
4236
+ "ai.result.text": { output: () => fullStepText },
4209
4237
  "ai.result.toolCalls": {
4210
4238
  output: () => stepToolCallsJson
4211
4239
  }
4212
4240
  }
4213
4241
  })
4214
4242
  );
4215
- const responseMessages = stepResults.reduce(
4216
- (responseMessages2, step) => [
4243
+ const responseMessages = stepResults.reduce((responseMessages2, step) => {
4244
+ if (step.stepType === "continue") {
4245
+ const lastResponseMessage = responseMessages2.pop();
4246
+ if (typeof lastResponseMessage.content === "string") {
4247
+ lastResponseMessage.content += step.text;
4248
+ } else {
4249
+ lastResponseMessage.content.push({
4250
+ text: step.text,
4251
+ type: "text"
4252
+ });
4253
+ }
4254
+ return [...responseMessages2, lastResponseMessage];
4255
+ }
4256
+ return [
4217
4257
  ...responseMessages2,
4218
4258
  ...toResponseMessages({
4219
4259
  text: step.text,
4220
4260
  toolCalls: step.toolCalls,
4221
4261
  toolResults: step.toolResults
4222
4262
  })
4223
- ],
4224
- []
4225
- );
4263
+ ];
4264
+ }, []);
4226
4265
  resolveUsage(combinedUsage);
4227
4266
  resolveFinishReason(stepFinishReason);
4228
- resolveText(stepText);
4267
+ resolveText(fullStepText);
4229
4268
  resolveToolCalls(stepToolCalls);
4230
4269
  resolveProviderMetadata(stepProviderMetadata);
4231
4270
  resolveToolResults(stepToolResults);
@@ -4239,7 +4278,7 @@ var DefaultStreamTextResult = class {
4239
4278
  finishReason: stepFinishReason,
4240
4279
  logprobs: stepLogProbs,
4241
4280
  usage: combinedUsage,
4242
- text: stepText,
4281
+ text: fullStepText,
4243
4282
  toolCalls: stepToolCalls,
4244
4283
  // The tool results are inferred as a never[] type, because they are
4245
4284
  // optional and the execute method with an inferred result type is
@@ -4272,7 +4311,8 @@ var DefaultStreamTextResult = class {
4272
4311
  doStreamSpan,
4273
4312
  currentStep: 0,
4274
4313
  promptMessages,
4275
- usage: void 0
4314
+ usage: void 0,
4315
+ stepType: "initial"
4276
4316
  });
4277
4317
  }
4278
4318
  /**