ai 3.4.2 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2990,7 +2990,8 @@ async function generateText({
2990
2990
  maxAutomaticRoundtrips = 0,
2991
2991
  maxToolRoundtrips = maxAutomaticRoundtrips,
2992
2992
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
2993
- experimental_continuationSteps: continuationSteps = false,
2993
+ experimental_continuationSteps,
2994
+ experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
2994
2995
  experimental_telemetry: telemetry,
2995
2996
  experimental_providerMetadata: providerMetadata,
2996
2997
  _internal: {
@@ -3034,7 +3035,7 @@ async function generateText({
3034
3035
  }),
3035
3036
  tracer,
3036
3037
  fn: async (span) => {
3037
- var _a12, _b, _c, _d, _e, _f;
3038
+ var _a12, _b, _c, _d, _e, _f, _g;
3038
3039
  const retry = retryWithExponentialBackoff({ maxRetries });
3039
3040
  const validatedPrompt = validatePrompt({
3040
3041
  system,
@@ -3161,12 +3162,13 @@ async function generateText({
3161
3162
  usage.completionTokens += currentUsage.completionTokens;
3162
3163
  usage.promptTokens += currentUsage.promptTokens;
3163
3164
  usage.totalTokens += currentUsage.totalTokens;
3164
- if (stepType === "continuation") {
3165
- text += " " + ((_b = currentModelResponse.text) != null ? _b : "");
3165
+ if (stepType === "continue") {
3166
+ text += (_b = currentModelResponse.text) != null ? _b : "";
3166
3167
  } else {
3167
3168
  text = (_c = currentModelResponse.text) != null ? _c : "";
3168
3169
  }
3169
3170
  const currentStep = {
3171
+ stepType,
3170
3172
  text: (_d = currentModelResponse.text) != null ? _d : "",
3171
3173
  toolCalls: currentToolCalls,
3172
3174
  toolResults: currentToolResults,
@@ -3182,14 +3184,14 @@ async function generateText({
3182
3184
  };
3183
3185
  steps.push(currentStep);
3184
3186
  await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3185
- if (stepType === "continuation") {
3187
+ if (stepType === "continue") {
3186
3188
  const lastResponseMessage = responseMessages.pop();
3187
3189
  promptMessages.pop();
3188
3190
  if (typeof lastResponseMessage.content === "string") {
3189
3191
  lastResponseMessage.content = text;
3190
3192
  } else {
3191
3193
  lastResponseMessage.content.push({
3192
- text: " " + currentModelResponse.text,
3194
+ text: (_f = currentModelResponse.text) != null ? _f : "",
3193
3195
  type: "text"
3194
3196
  });
3195
3197
  }
@@ -3212,9 +3214,9 @@ async function generateText({
3212
3214
  }
3213
3215
  if (++stepCount >= maxSteps) {
3214
3216
  stepType = "done";
3215
- } else if (continuationSteps === true && currentStep.finishReason === "length" && // only use continuation when there are no tool calls:
3217
+ } else if (continueSteps && currentStep.finishReason === "length" && // only use continue when there are no tool calls:
3216
3218
  currentToolCalls.length === 0) {
3217
- stepType = "continuation";
3219
+ stepType = "continue";
3218
3220
  } else if (
3219
3221
  // there are tool calls:
3220
3222
  currentToolCalls.length > 0 && // all current tool calls have results:
@@ -3258,7 +3260,7 @@ async function generateText({
3258
3260
  warnings: currentModelResponse.warnings,
3259
3261
  response: {
3260
3262
  ...currentModelResponse.response,
3261
- headers: (_f = currentModelResponse.rawResponse) == null ? void 0 : _f.headers
3263
+ headers: (_g = currentModelResponse.rawResponse) == null ? void 0 : _g.headers
3262
3264
  },
3263
3265
  logprobs: currentModelResponse.logprobs,
3264
3266
  responseMessages,
@@ -3704,6 +3706,7 @@ async function streamText({
3704
3706
  headers,
3705
3707
  maxToolRoundtrips = 0,
3706
3708
  maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
3709
+ experimental_continueSteps: continueSteps = false,
3707
3710
  experimental_telemetry: telemetry,
3708
3711
  experimental_providerMetadata: providerMetadata,
3709
3712
  experimental_toolCallStreaming: toolCallStreaming = false,
@@ -3848,6 +3851,7 @@ async function streamText({
3848
3851
  telemetry,
3849
3852
  startTimestampMs,
3850
3853
  maxSteps,
3854
+ continueSteps,
3851
3855
  startStep,
3852
3856
  promptMessages,
3853
3857
  modelId: model.modelId,
@@ -3871,6 +3875,7 @@ var DefaultStreamTextResult = class {
3871
3875
  telemetry,
3872
3876
  startTimestampMs,
3873
3877
  maxSteps,
3878
+ continueSteps,
3874
3879
  startStep,
3875
3880
  promptMessages,
3876
3881
  modelId,
@@ -3922,7 +3927,9 @@ var DefaultStreamTextResult = class {
3922
3927
  promptTokens: 0,
3923
3928
  completionTokens: 0,
3924
3929
  totalTokens: 0
3925
- }
3930
+ },
3931
+ stepType,
3932
+ previousStepText = ""
3926
3933
  }) {
3927
3934
  const stepToolCalls = [];
3928
3935
  const stepToolResults = [];
@@ -3935,6 +3942,7 @@ var DefaultStreamTextResult = class {
3935
3942
  let stepProviderMetadata;
3936
3943
  let stepFirstChunk = true;
3937
3944
  let stepText = "";
3945
+ let fullStepText = stepType === "continue" ? previousStepText : "";
3938
3946
  let stepLogProbs;
3939
3947
  let stepResponse = {
3940
3948
  id: generateId3(),
@@ -3968,6 +3976,7 @@ var DefaultStreamTextResult = class {
3968
3976
  case "text-delta": {
3969
3977
  controller.enqueue(chunk);
3970
3978
  stepText += chunk.textDelta;
3979
+ fullStepText += chunk.textDelta;
3971
3980
  await (onChunk == null ? void 0 : onChunk({ chunk }));
3972
3981
  break;
3973
3982
  }
@@ -4067,6 +4076,7 @@ var DefaultStreamTextResult = class {
4067
4076
  response: stepResponse
4068
4077
  });
4069
4078
  const stepResult = {
4079
+ stepType,
4070
4080
  text: stepText,
4071
4081
  toolCalls: stepToolCalls,
4072
4082
  toolResults: stepToolResults,
@@ -4085,21 +4095,37 @@ var DefaultStreamTextResult = class {
4085
4095
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
4086
4096
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4087
4097
  };
4088
- if (
4089
- // there are tool calls:
4090
- stepToolCalls.length > 0 && // all current tool calls have results:
4091
- stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
4092
- currentStep + 1 < maxSteps
4093
- ) {
4094
- promptMessages2.push(
4095
- ...toResponseMessages({
4098
+ let nextStepType = "done";
4099
+ if (currentStep + 1 < maxSteps) {
4100
+ if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
4101
+ stepToolCalls.length === 0) {
4102
+ nextStepType = "continue";
4103
+ } else if (
4104
+ // there are tool calls:
4105
+ stepToolCalls.length > 0 && // all current tool calls have results:
4106
+ stepToolResults.length === stepToolCalls.length
4107
+ ) {
4108
+ nextStepType = "tool-result";
4109
+ }
4110
+ }
4111
+ if (nextStepType !== "done") {
4112
+ if (stepType === "continue") {
4113
+ const lastPromptMessage = promptMessages2[promptMessages2.length - 1];
4114
+ lastPromptMessage.content.push({
4096
4115
  text: stepText,
4097
- toolCalls: stepToolCalls,
4098
- toolResults: stepToolResults
4099
- }).map(
4100
- (message) => convertToLanguageModelMessage(message, null)
4101
- )
4102
- );
4116
+ type: "text"
4117
+ });
4118
+ } else {
4119
+ promptMessages2.push(
4120
+ ...toResponseMessages({
4121
+ text: stepText,
4122
+ toolCalls: stepToolCalls,
4123
+ toolResults: stepToolResults
4124
+ }).map(
4125
+ (message) => convertToLanguageModelMessage(message, null)
4126
+ )
4127
+ );
4128
+ }
4103
4129
  const {
4104
4130
  result,
4105
4131
  doStreamSpan: doStreamSpan3,
@@ -4116,7 +4142,9 @@ var DefaultStreamTextResult = class {
4116
4142
  doStreamSpan: doStreamSpan3,
4117
4143
  currentStep: currentStep + 1,
4118
4144
  promptMessages: promptMessages2,
4119
- usage: combinedUsage
4145
+ usage: combinedUsage,
4146
+ stepType: nextStepType,
4147
+ previousStepText: fullStepText
4120
4148
  });
4121
4149
  return;
4122
4150
  }
@@ -4135,7 +4163,7 @@ var DefaultStreamTextResult = class {
4135
4163
  telemetry,
4136
4164
  attributes: {
4137
4165
  "ai.response.finishReason": stepFinishReason,
4138
- "ai.response.text": { output: () => stepText },
4166
+ "ai.response.text": { output: () => fullStepText },
4139
4167
  "ai.response.toolCalls": {
4140
4168
  output: () => stepToolCallsJson
4141
4169
  },
@@ -4143,27 +4171,38 @@ var DefaultStreamTextResult = class {
4143
4171
  "ai.usage.completionTokens": combinedUsage.completionTokens,
4144
4172
  // deprecated
4145
4173
  "ai.finishReason": stepFinishReason,
4146
- "ai.result.text": { output: () => stepText },
4174
+ "ai.result.text": { output: () => fullStepText },
4147
4175
  "ai.result.toolCalls": {
4148
4176
  output: () => stepToolCallsJson
4149
4177
  }
4150
4178
  }
4151
4179
  })
4152
4180
  );
4153
- const responseMessages = stepResults.reduce(
4154
- (responseMessages2, step) => [
4181
+ const responseMessages = stepResults.reduce((responseMessages2, step) => {
4182
+ if (step.stepType === "continue") {
4183
+ const lastResponseMessage = responseMessages2.pop();
4184
+ if (typeof lastResponseMessage.content === "string") {
4185
+ lastResponseMessage.content += step.text;
4186
+ } else {
4187
+ lastResponseMessage.content.push({
4188
+ text: step.text,
4189
+ type: "text"
4190
+ });
4191
+ }
4192
+ return [...responseMessages2, lastResponseMessage];
4193
+ }
4194
+ return [
4155
4195
  ...responseMessages2,
4156
4196
  ...toResponseMessages({
4157
4197
  text: step.text,
4158
4198
  toolCalls: step.toolCalls,
4159
4199
  toolResults: step.toolResults
4160
4200
  })
4161
- ],
4162
- []
4163
- );
4201
+ ];
4202
+ }, []);
4164
4203
  resolveUsage(combinedUsage);
4165
4204
  resolveFinishReason(stepFinishReason);
4166
- resolveText(stepText);
4205
+ resolveText(fullStepText);
4167
4206
  resolveToolCalls(stepToolCalls);
4168
4207
  resolveProviderMetadata(stepProviderMetadata);
4169
4208
  resolveToolResults(stepToolResults);
@@ -4177,7 +4216,7 @@ var DefaultStreamTextResult = class {
4177
4216
  finishReason: stepFinishReason,
4178
4217
  logprobs: stepLogProbs,
4179
4218
  usage: combinedUsage,
4180
- text: stepText,
4219
+ text: fullStepText,
4181
4220
  toolCalls: stepToolCalls,
4182
4221
  // The tool results are inferred as a never[] type, because they are
4183
4222
  // optional and the execute method with an inferred result type is
@@ -4210,7 +4249,8 @@ var DefaultStreamTextResult = class {
4210
4249
  doStreamSpan,
4211
4250
  currentStep: 0,
4212
4251
  promptMessages,
4213
- usage: void 0
4252
+ usage: void 0,
4253
+ stepType: "initial"
4214
4254
  });
4215
4255
  }
4216
4256
  /**