ai 3.4.3 → 3.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,24 @@
1
1
  # ai
2
2
 
3
+ ## 3.4.5
4
+
5
+ ### Patch Changes
6
+
7
+ - cd77c5d: feat (ai/core): add isContinued to steps
8
+ - Updated dependencies [cd77c5d]
9
+ - @ai-sdk/ui-utils@0.0.45
10
+ - @ai-sdk/react@0.0.61
11
+ - @ai-sdk/solid@0.0.48
12
+ - @ai-sdk/svelte@0.0.50
13
+ - @ai-sdk/vue@0.0.52
14
+
15
+ ## 3.4.4
16
+
17
+ ### Patch Changes
18
+
19
+ - 4db074b: fix (ai/core): correct whitespace in generateText continueSteps
20
+ - 1297e1b: fix (ai/core): correct whitespace in streamText continueSteps
21
+
3
22
  ## 3.4.3
4
23
 
5
24
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1368,6 +1368,10 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1368
1368
  or "tool-result" steps.
1369
1369
  */
1370
1370
  readonly stepType: 'initial' | 'continue' | 'tool-result';
1371
+ /**
1372
+ True when there will be a continuation step with a continuation text.
1373
+ */
1374
+ readonly isContinued: boolean;
1371
1375
  };
1372
1376
 
1373
1377
  /**
@@ -1788,6 +1792,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1788
1792
  usage: LanguageModelUsage$1;
1789
1793
  response: LanguageModelResponseMetadata;
1790
1794
  experimental_providerMetadata?: ProviderMetadata;
1795
+ isContinued: boolean;
1791
1796
  } | {
1792
1797
  type: 'finish';
1793
1798
  finishReason: FinishReason;
@@ -1917,7 +1922,7 @@ Callback that is called when the LLM response and all request tool executions
1917
1922
 
1918
1923
  The usage is the combined usage of all steps.
1919
1924
  */
1920
- onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1925
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
1921
1926
  /**
1922
1927
  Details for all steps.
1923
1928
  */
package/dist/index.d.ts CHANGED
@@ -1368,6 +1368,10 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1368
1368
  or "tool-result" steps.
1369
1369
  */
1370
1370
  readonly stepType: 'initial' | 'continue' | 'tool-result';
1371
+ /**
1372
+ True when there will be a continuation step with a continuation text.
1373
+ */
1374
+ readonly isContinued: boolean;
1371
1375
  };
1372
1376
 
1373
1377
  /**
@@ -1788,6 +1792,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1788
1792
  usage: LanguageModelUsage$1;
1789
1793
  response: LanguageModelResponseMetadata;
1790
1794
  experimental_providerMetadata?: ProviderMetadata;
1795
+ isContinued: boolean;
1791
1796
  } | {
1792
1797
  type: 'finish';
1793
1798
  finishReason: FinishReason;
@@ -1917,7 +1922,7 @@ Callback that is called when the LLM response and all request tool executions
1917
1922
 
1918
1923
  The usage is the combined usage of all steps.
1919
1924
  */
1920
- onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType'> & {
1925
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
1921
1926
  /**
1922
1927
  Details for all steps.
1923
1928
  */
package/dist/index.js CHANGED
@@ -2977,6 +2977,19 @@ function prepareToolsAndToolChoice({
2977
2977
  };
2978
2978
  }
2979
2979
 
2980
+ // core/util/split-on-last-whitespace.ts
2981
+ var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
2982
+ function splitOnLastWhitespace(text) {
2983
+ const match = text.match(lastWhitespaceRegexp);
2984
+ return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
2985
+ }
2986
+
2987
+ // core/util/remove-text-after-last-whitespace.ts
2988
+ function removeTextAfterLastWhitespace(text) {
2989
+ const match = splitOnLastWhitespace(text);
2990
+ return match ? match.prefix + match.whitespace : text;
2991
+ }
2992
+
2980
2993
  // core/generate-text/parse-tool-call.ts
2981
2994
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
2982
2995
  var import_ui_utils4 = require("@ai-sdk/ui-utils");
@@ -3097,7 +3110,7 @@ async function generateText({
3097
3110
  }),
3098
3111
  tracer,
3099
3112
  fn: async (span) => {
3100
- var _a12, _b, _c, _d, _e, _f, _g;
3113
+ var _a12, _b, _c, _d, _e;
3101
3114
  const retry = retryWithExponentialBackoff({ maxRetries });
3102
3115
  const validatedPrompt = validatePrompt({
3103
3116
  system,
@@ -3157,7 +3170,7 @@ async function generateText({
3157
3170
  }),
3158
3171
  tracer,
3159
3172
  fn: async (span2) => {
3160
- var _a13, _b2, _c2, _d2, _e2, _f2;
3173
+ var _a13, _b2, _c2, _d2, _e2, _f;
3161
3174
  const result = await model.doGenerate({
3162
3175
  mode,
3163
3176
  ...callSettings,
@@ -3170,7 +3183,7 @@ async function generateText({
3170
3183
  const responseData = {
3171
3184
  id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
3172
3185
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
3173
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
3186
+ modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
3174
3187
  };
3175
3188
  span2.setAttributes(
3176
3189
  selectTelemetryAttributes({
@@ -3224,14 +3237,24 @@ async function generateText({
3224
3237
  usage.completionTokens += currentUsage.completionTokens;
3225
3238
  usage.promptTokens += currentUsage.promptTokens;
3226
3239
  usage.totalTokens += currentUsage.totalTokens;
3227
- if (stepType === "continue") {
3228
- text += (_b = currentModelResponse.text) != null ? _b : "";
3229
- } else {
3230
- text = (_c = currentModelResponse.text) != null ? _c : "";
3240
+ let nextStepType = "done";
3241
+ if (++stepCount < maxSteps) {
3242
+ if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
3243
+ currentToolCalls.length === 0) {
3244
+ nextStepType = "continue";
3245
+ } else if (
3246
+ // there are tool calls:
3247
+ currentToolCalls.length > 0 && // all current tool calls have results:
3248
+ currentToolResults.length === currentToolCalls.length
3249
+ ) {
3250
+ nextStepType = "tool-result";
3251
+ }
3231
3252
  }
3253
+ const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace((_b = currentModelResponse.text) != null ? _b : "") : (_c = currentModelResponse.text) != null ? _c : "";
3254
+ text = nextStepType === "continue" || stepType === "continue" ? text + stepText : stepText;
3232
3255
  const currentStep = {
3233
3256
  stepType,
3234
- text: (_d = currentModelResponse.text) != null ? _d : "",
3257
+ text: stepText,
3235
3258
  toolCalls: currentToolCalls,
3236
3259
  toolResults: currentToolResults,
3237
3260
  finishReason: currentModelResponse.finishReason,
@@ -3240,9 +3263,10 @@ async function generateText({
3240
3263
  logprobs: currentModelResponse.logprobs,
3241
3264
  response: {
3242
3265
  ...currentModelResponse.response,
3243
- headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3266
+ headers: (_d = currentModelResponse.rawResponse) == null ? void 0 : _d.headers
3244
3267
  },
3245
- experimental_providerMetadata: currentModelResponse.providerMetadata
3268
+ experimental_providerMetadata: currentModelResponse.providerMetadata,
3269
+ isContinued: nextStepType === "continue"
3246
3270
  };
3247
3271
  steps.push(currentStep);
3248
3272
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
@@ -3253,7 +3277,7 @@ async function generateText({
3253
3277
  lastResponseMessage.content = text;
3254
3278
  } else {
3255
3279
  lastResponseMessage.content.push({
3256
- text: (_f = currentModelResponse.text) != null ? _f : "",
3280
+ text: stepText,
3257
3281
  type: "text"
3258
3282
  });
3259
3283
  }
@@ -3261,6 +3285,18 @@ async function generateText({
3261
3285
  promptMessages.push(
3262
3286
  convertToLanguageModelMessage(lastResponseMessage, null)
3263
3287
  );
3288
+ } else if (nextStepType === "continue") {
3289
+ const newResponseMessages = toResponseMessages({
3290
+ text,
3291
+ toolCalls: currentToolCalls,
3292
+ toolResults: currentToolResults
3293
+ });
3294
+ responseMessages.push(...newResponseMessages);
3295
+ promptMessages.push(
3296
+ ...newResponseMessages.map(
3297
+ (message) => convertToLanguageModelMessage(message, null)
3298
+ )
3299
+ );
3264
3300
  } else {
3265
3301
  const newResponseMessages = toResponseMessages({
3266
3302
  text: currentModelResponse.text,
@@ -3274,20 +3310,7 @@ async function generateText({
3274
3310
  )
3275
3311
  );
3276
3312
  }
3277
- if (++stepCount >= maxSteps) {
3278
- stepType = "done";
3279
- } else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
3280
- currentToolCalls.length === 0) {
3281
- stepType = "continue";
3282
- } else if (
3283
- // there are tool calls:
3284
- currentToolCalls.length > 0 && // all current tool calls have results:
3285
- currentToolResults.length === currentToolCalls.length
3286
- ) {
3287
- stepType = "tool-result";
3288
- } else {
3289
- stepType = "done";
3290
- }
3313
+ stepType = nextStepType;
3291
3314
  } while (stepType !== "done");
3292
3315
  span.setAttributes(
3293
3316
  selectTelemetryAttributes({
@@ -3322,7 +3345,7 @@ async function generateText({
3322
3345
  warnings: currentModelResponse.warnings,
3323
3346
  response: {
3324
3347
  ...currentModelResponse.response,
3325
- headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers
3348
+ headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
3326
3349
  },
3327
3350
  logprobs: currentModelResponse.logprobs,
3328
3351
  responseMessages,
@@ -4011,6 +4034,18 @@ var DefaultStreamTextResult = class {
4011
4034
  timestamp: currentDate(),
4012
4035
  modelId
4013
4036
  };
4037
+ let chunkBuffer = "";
4038
+ let chunkTextPublished = false;
4039
+ async function publishTextChunk({
4040
+ controller,
4041
+ chunk
4042
+ }) {
4043
+ controller.enqueue(chunk);
4044
+ stepText += chunk.textDelta;
4045
+ fullStepText += chunk.textDelta;
4046
+ chunkTextPublished = true;
4047
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4048
+ }
4014
4049
  addStream(
4015
4050
  stream2.pipeThrough(
4016
4051
  new TransformStream({
@@ -4036,10 +4071,22 @@ var DefaultStreamTextResult = class {
4036
4071
  const chunkType = chunk.type;
4037
4072
  switch (chunkType) {
4038
4073
  case "text-delta": {
4039
- controller.enqueue(chunk);
4040
- stepText += chunk.textDelta;
4041
- fullStepText += chunk.textDelta;
4042
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4074
+ if (continueSteps) {
4075
+ chunkBuffer += chunk.textDelta;
4076
+ const split = splitOnLastWhitespace(chunkBuffer);
4077
+ if (split != null) {
4078
+ chunkBuffer = split.suffix;
4079
+ await publishTextChunk({
4080
+ controller,
4081
+ chunk: {
4082
+ type: "text-delta",
4083
+ textDelta: split.prefix + split.whitespace
4084
+ }
4085
+ });
4086
+ }
4087
+ } else {
4088
+ await publishTextChunk({ controller, chunk });
4089
+ }
4043
4090
  break;
4044
4091
  }
4045
4092
  case "tool-call": {
@@ -4095,6 +4142,30 @@ var DefaultStreamTextResult = class {
4095
4142
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4096
4143
  async flush(controller) {
4097
4144
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4145
+ let nextStepType = "done";
4146
+ if (currentStep + 1 < maxSteps) {
4147
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4148
+ stepToolCalls.length === 0) {
4149
+ nextStepType = "continue";
4150
+ } else if (
4151
+ // there are tool calls:
4152
+ stepToolCalls.length > 0 && // all current tool calls have results:
4153
+ stepToolResults.length === stepToolCalls.length
4154
+ ) {
4155
+ nextStepType = "tool-result";
4156
+ }
4157
+ }
4158
+ if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4159
+ stepType === "continue" && !chunkTextPublished)) {
4160
+ await publishTextChunk({
4161
+ controller,
4162
+ chunk: {
4163
+ type: "text-delta",
4164
+ textDelta: chunkBuffer
4165
+ }
4166
+ });
4167
+ chunkBuffer = "";
4168
+ }
4098
4169
  try {
4099
4170
  doStreamSpan2.setAttributes(
4100
4171
  selectTelemetryAttributes({
@@ -4135,7 +4206,8 @@ var DefaultStreamTextResult = class {
4135
4206
  usage: stepUsage,
4136
4207
  experimental_providerMetadata: stepProviderMetadata,
4137
4208
  logprobs: stepLogProbs,
4138
- response: stepResponse
4209
+ response: stepResponse,
4210
+ isContinued: nextStepType === "continue"
4139
4211
  });
4140
4212
  const stepResult = {
4141
4213
  stepType,
@@ -4148,7 +4220,8 @@ var DefaultStreamTextResult = class {
4148
4220
  logprobs: stepLogProbs,
4149
4221
  response: stepResponse,
4150
4222
  rawResponse: self.rawResponse,
4151
- experimental_providerMetadata: stepProviderMetadata
4223
+ experimental_providerMetadata: stepProviderMetadata,
4224
+ isContinued: nextStepType === "continue"
4152
4225
  };
4153
4226
  stepResults.push(stepResult);
4154
4227
  await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
@@ -4157,19 +4230,6 @@ var DefaultStreamTextResult = class {
4157
4230
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4158
4231
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4159
4232
  };
4160
- let nextStepType = "done";
4161
- if (currentStep + 1 < maxSteps) {
4162
- if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4163
- stepToolCalls.length === 0) {
4164
- nextStepType = "continue";
4165
- } else if (
4166
- // there are tool calls:
4167
- stepToolCalls.length > 0 && // all current tool calls have results:
4168
- stepToolResults.length === stepToolCalls.length
4169
- ) {
4170
- nextStepType = "tool-result";
4171
- }
4172
- }
4173
4233
  if (nextStepType !== "done") {
4174
4234
  if (stepType === "continue") {
4175
4235
  const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
@@ -4437,7 +4497,8 @@ var DefaultStreamTextResult = class {
4437
4497
  usage: sendUsage ? {
4438
4498
  promptTokens: chunk.usage.promptTokens,
4439
4499
  completionTokens: chunk.usage.completionTokens
4440
- } : void 0
4500
+ } : void 0,
4501
+ isContinued: chunk.isContinued
4441
4502
  })
4442
4503
  );
4443
4504
  break;