ai 4.0.0 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -3291,15 +3291,16 @@ async function generateText({
3291
3291
  };
3292
3292
  let stepType = "initial";
3293
3293
  do {
3294
- if (stepCount === 1) {
3295
- initialPrompt.type = "messages";
3296
- }
3297
3294
  const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
3295
+ const stepInputMessages = [
3296
+ ...initialPrompt.messages,
3297
+ ...responseMessages
3298
+ ];
3298
3299
  const promptMessages = await convertToLanguageModelPrompt({
3299
3300
  prompt: {
3300
3301
  type: promptFormat,
3301
3302
  system: initialPrompt.system,
3302
- messages: [...initialPrompt.messages, ...responseMessages]
3303
+ messages: stepInputMessages
3303
3304
  },
3304
3305
  modelSupportsImageUrls: model.supportsImageUrls,
3305
3306
  modelSupportsUrl: model.supportsUrl
@@ -3395,6 +3396,7 @@ async function generateText({
3395
3396
  tools,
3396
3397
  tracer,
3397
3398
  telemetry,
3399
+ messages: stepInputMessages,
3398
3400
  abortSignal
3399
3401
  });
3400
3402
  const currentUsage = calculateLanguageModelUsage(
@@ -3505,6 +3507,7 @@ async function executeTools({
3505
3507
  tools,
3506
3508
  tracer,
3507
3509
  telemetry,
3510
+ messages,
3508
3511
  abortSignal
3509
3512
  }) {
3510
3513
  const toolResults = await Promise.all(
@@ -3531,7 +3534,10 @@ async function executeTools({
3531
3534
  }),
3532
3535
  tracer,
3533
3536
  fn: async (span) => {
3534
- const result2 = await tool2.execute(toolCall.args, { abortSignal });
3537
+ const result2 = await tool2.execute(toolCall.args, {
3538
+ messages,
3539
+ abortSignal
3540
+ });
3535
3541
  try {
3536
3542
  span.setAttributes(
3537
3543
  selectTelemetryAttributes({
@@ -3676,6 +3682,7 @@ function runToolsTransformation({
3676
3682
  toolCallStreaming,
3677
3683
  tracer,
3678
3684
  telemetry,
3685
+ messages,
3679
3686
  abortSignal
3680
3687
  }) {
3681
3688
  let toolResultsStreamController = null;
@@ -3771,7 +3778,10 @@ function runToolsTransformation({
3771
3778
  }
3772
3779
  }),
3773
3780
  tracer,
3774
- fn: async (span) => tool2.execute(toolCall.args, { abortSignal }).then(
3781
+ fn: async (span) => tool2.execute(toolCall.args, {
3782
+ messages,
3783
+ abortSignal
3784
+ }).then(
3775
3785
  (result) => {
3776
3786
  toolResultsStreamController.enqueue({
3777
3787
  ...toolCall,
@@ -3997,11 +4007,15 @@ var DefaultStreamTextResult = class {
3997
4007
  hasLeadingWhitespace
3998
4008
  }) {
3999
4009
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4010
+ const stepInputMessages = [
4011
+ ...initialPrompt.messages,
4012
+ ...responseMessages
4013
+ ];
4000
4014
  const promptMessages = await convertToLanguageModelPrompt({
4001
4015
  prompt: {
4002
4016
  type: promptFormat,
4003
4017
  system: initialPrompt.system,
4004
- messages: [...initialPrompt.messages, ...responseMessages]
4018
+ messages: stepInputMessages
4005
4019
  },
4006
4020
  modelSupportsImageUrls: model.supportsImageUrls,
4007
4021
  modelSupportsUrl: model.supportsUrl
@@ -4077,6 +4091,7 @@ var DefaultStreamTextResult = class {
4077
4091
  toolCallStreaming,
4078
4092
  tracer,
4079
4093
  telemetry,
4094
+ messages: stepInputMessages,
4080
4095
  abortSignal
4081
4096
  });
4082
4097
  const stepRequest = request != null ? request : {};