ai 4.0.18 → 4.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -32,15 +32,19 @@ function createDataStream({
32
32
  controller = controllerArg;
33
33
  }
34
34
  });
35
+ function safeEnqueue(data) {
36
+ try {
37
+ controller.enqueue(data);
38
+ } catch (error) {
39
+ }
40
+ }
35
41
  try {
36
42
  const result = execute({
37
43
  writeData(data) {
38
- controller.enqueue(formatDataStreamPart("data", [data]));
44
+ safeEnqueue(formatDataStreamPart("data", [data]));
39
45
  },
40
46
  writeMessageAnnotation(annotation) {
41
- controller.enqueue(
42
- formatDataStreamPart("message_annotations", [annotation])
43
- );
47
+ safeEnqueue(formatDataStreamPart("message_annotations", [annotation]));
44
48
  },
45
49
  merge(streamArg) {
46
50
  ongoingStreamPromises.push(
@@ -50,10 +54,10 @@ function createDataStream({
50
54
  const { done, value } = await reader.read();
51
55
  if (done)
52
56
  break;
53
- controller.enqueue(value);
57
+ safeEnqueue(value);
54
58
  }
55
59
  })().catch((error) => {
56
- controller.enqueue(formatDataStreamPart("error", onError(error)));
60
+ safeEnqueue(formatDataStreamPart("error", onError(error)));
57
61
  })
58
62
  );
59
63
  },
@@ -62,12 +66,12 @@ function createDataStream({
62
66
  if (result) {
63
67
  ongoingStreamPromises.push(
64
68
  result.catch((error) => {
65
- controller.enqueue(formatDataStreamPart("error", onError(error)));
69
+ safeEnqueue(formatDataStreamPart("error", onError(error)));
66
70
  })
67
71
  );
68
72
  }
69
73
  } catch (error) {
70
- controller.enqueue(formatDataStreamPart("error", onError(error)));
74
+ safeEnqueue(formatDataStreamPart("error", onError(error)));
71
75
  }
72
76
  const waitForStreams = new Promise(async (resolve) => {
73
77
  while (ongoingStreamPromises.length > 0) {
@@ -76,7 +80,10 @@ function createDataStream({
76
80
  resolve();
77
81
  });
78
82
  waitForStreams.finally(() => {
79
- controller.close();
83
+ try {
84
+ controller.close();
85
+ } catch (error) {
86
+ }
80
87
  });
81
88
  return stream;
82
89
  }
@@ -1744,6 +1751,13 @@ function calculateLanguageModelUsage({
1744
1751
  totalTokens: promptTokens + completionTokens
1745
1752
  };
1746
1753
  }
1754
+ function addLanguageModelUsage(usage1, usage2) {
1755
+ return {
1756
+ promptTokens: usage1.promptTokens + usage2.promptTokens,
1757
+ completionTokens: usage1.completionTokens + usage2.completionTokens,
1758
+ totalTokens: usage1.totalTokens + usage2.totalTokens
1759
+ };
1760
+ }
1747
1761
 
1748
1762
  // core/generate-object/inject-json-instruction.ts
1749
1763
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
@@ -3582,7 +3596,7 @@ async function generateText({
3582
3596
  const responseMessages = [];
3583
3597
  let text2 = "";
3584
3598
  const steps = [];
3585
- const usage = {
3599
+ let usage = {
3586
3600
  completionTokens: 0,
3587
3601
  promptTokens: 0,
3588
3602
  totalTokens: 0
@@ -3709,9 +3723,7 @@ async function generateText({
3709
3723
  const currentUsage = calculateLanguageModelUsage(
3710
3724
  currentModelResponse.usage
3711
3725
  );
3712
- usage.completionTokens += currentUsage.completionTokens;
3713
- usage.promptTokens += currentUsage.promptTokens;
3714
- usage.totalTokens += currentUsage.totalTokens;
3726
+ usage = addLanguageModelUsage(usage, currentUsage);
3715
3727
  let nextStepType = "done";
3716
3728
  if (++stepCount < maxSteps) {
3717
3729
  if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
@@ -4122,25 +4134,6 @@ function runToolsTransformation({
4122
4134
  break;
4123
4135
  }
4124
4136
  case "tool-call": {
4125
- const toolName = chunk.toolName;
4126
- if (tools == null) {
4127
- toolResultsStreamController.enqueue({
4128
- type: "error",
4129
- error: new NoSuchToolError({ toolName: chunk.toolName })
4130
- });
4131
- break;
4132
- }
4133
- const tool2 = tools[toolName];
4134
- if (tool2 == null) {
4135
- toolResultsStreamController.enqueue({
4136
- type: "error",
4137
- error: new NoSuchToolError({
4138
- toolName: chunk.toolName,
4139
- availableTools: Object.keys(tools)
4140
- })
4141
- });
4142
- break;
4143
- }
4144
4137
  try {
4145
4138
  const toolCall = await parseToolCall({
4146
4139
  toolCall: chunk,
@@ -4150,6 +4143,7 @@ function runToolsTransformation({
4150
4143
  messages
4151
4144
  });
4152
4145
  controller.enqueue(toolCall);
4146
+ const tool2 = tools[toolCall.toolName];
4153
4147
  if (tool2.execute != null) {
4154
4148
  const toolExecutionId = generateId();
4155
4149
  outstandingToolResults.add(toolExecutionId);
@@ -4369,10 +4363,168 @@ var DefaultStreamTextResult = class {
4369
4363
  message: "maxSteps must be at least 1"
4370
4364
  });
4371
4365
  }
4366
+ let recordedStepText = "";
4367
+ let recordedContinuationText = "";
4368
+ let recordedFullText = "";
4369
+ let recordedRequest = void 0;
4370
+ const recordedResponse = {
4371
+ id: generateId3(),
4372
+ timestamp: currentDate(),
4373
+ modelId: model.modelId,
4374
+ messages: []
4375
+ };
4376
+ let recordedToolCalls = [];
4377
+ let recordedToolResults = [];
4378
+ let recordedFinishReason = void 0;
4379
+ let recordedUsage = void 0;
4380
+ let recordedProviderMetadata = void 0;
4381
+ let stepType = "initial";
4382
+ const recordedSteps = [];
4383
+ let rootSpan;
4384
+ const eventProcessor = new TransformStream({
4385
+ async transform(chunk, controller) {
4386
+ controller.enqueue(chunk);
4387
+ if (chunk.type === "text-delta" || chunk.type === "tool-call" || chunk.type === "tool-result" || chunk.type === "tool-call-streaming-start" || chunk.type === "tool-call-delta") {
4388
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4389
+ }
4390
+ if (chunk.type === "text-delta") {
4391
+ recordedStepText += chunk.textDelta;
4392
+ recordedContinuationText += chunk.textDelta;
4393
+ recordedFullText += chunk.textDelta;
4394
+ }
4395
+ if (chunk.type === "tool-call") {
4396
+ recordedToolCalls.push(chunk);
4397
+ }
4398
+ if (chunk.type === "tool-result") {
4399
+ recordedToolResults.push(chunk);
4400
+ }
4401
+ if (chunk.type === "step-finish") {
4402
+ const stepMessages = toResponseMessages({
4403
+ text: recordedContinuationText,
4404
+ tools: tools != null ? tools : {},
4405
+ toolCalls: recordedToolCalls,
4406
+ toolResults: recordedToolResults
4407
+ });
4408
+ const currentStep = recordedSteps.length;
4409
+ let nextStepType = "done";
4410
+ if (currentStep + 1 < maxSteps) {
4411
+ if (continueSteps && chunk.finishReason === "length" && // only use continue when there are no tool calls:
4412
+ recordedToolCalls.length === 0) {
4413
+ nextStepType = "continue";
4414
+ } else if (
4415
+ // there are tool calls:
4416
+ recordedToolCalls.length > 0 && // all current tool calls have results:
4417
+ recordedToolResults.length === recordedToolCalls.length
4418
+ ) {
4419
+ nextStepType = "tool-result";
4420
+ }
4421
+ }
4422
+ const currentStepResult = {
4423
+ stepType,
4424
+ text: recordedStepText,
4425
+ toolCalls: recordedToolCalls,
4426
+ toolResults: recordedToolResults,
4427
+ finishReason: chunk.finishReason,
4428
+ usage: chunk.usage,
4429
+ warnings: chunk.warnings,
4430
+ logprobs: chunk.logprobs,
4431
+ request: chunk.request,
4432
+ response: {
4433
+ ...chunk.response,
4434
+ messages: [...recordedResponse.messages, ...stepMessages]
4435
+ },
4436
+ experimental_providerMetadata: chunk.experimental_providerMetadata,
4437
+ isContinued: chunk.isContinued
4438
+ };
4439
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4440
+ recordedSteps.push(currentStepResult);
4441
+ recordedToolCalls = [];
4442
+ recordedToolResults = [];
4443
+ recordedStepText = "";
4444
+ recordedRequest = chunk.request;
4445
+ if (nextStepType !== "done") {
4446
+ stepType = nextStepType;
4447
+ }
4448
+ if (nextStepType !== "continue") {
4449
+ recordedResponse.messages.push(...stepMessages);
4450
+ recordedContinuationText = "";
4451
+ }
4452
+ }
4453
+ if (chunk.type === "finish") {
4454
+ recordedResponse.id = chunk.response.id;
4455
+ recordedResponse.timestamp = chunk.response.timestamp;
4456
+ recordedResponse.modelId = chunk.response.modelId;
4457
+ recordedResponse.headers = chunk.response.headers;
4458
+ recordedUsage = chunk.usage;
4459
+ recordedFinishReason = chunk.finishReason;
4460
+ recordedProviderMetadata = chunk.experimental_providerMetadata;
4461
+ }
4462
+ },
4463
+ async flush(controller) {
4464
+ var _a13;
4465
+ try {
4466
+ const lastStep = recordedSteps[recordedSteps.length - 1];
4467
+ if (lastStep) {
4468
+ self.warningsPromise.resolve(lastStep.warnings);
4469
+ self.requestPromise.resolve(lastStep.request);
4470
+ self.responsePromise.resolve(lastStep.response);
4471
+ self.toolCallsPromise.resolve(lastStep.toolCalls);
4472
+ self.toolResultsPromise.resolve(lastStep.toolResults);
4473
+ self.providerMetadataPromise.resolve(
4474
+ lastStep.experimental_providerMetadata
4475
+ );
4476
+ }
4477
+ const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
4478
+ const usage = recordedUsage != null ? recordedUsage : {
4479
+ completionTokens: NaN,
4480
+ promptTokens: NaN,
4481
+ totalTokens: NaN
4482
+ };
4483
+ self.finishReasonPromise.resolve(finishReason);
4484
+ self.usagePromise.resolve(usage);
4485
+ self.textPromise.resolve(recordedFullText);
4486
+ self.stepsPromise.resolve(recordedSteps);
4487
+ await (onFinish == null ? void 0 : onFinish({
4488
+ finishReason,
4489
+ logprobs: void 0,
4490
+ usage,
4491
+ text: recordedFullText,
4492
+ toolCalls: lastStep.toolCalls,
4493
+ toolResults: lastStep.toolResults,
4494
+ request: (_a13 = lastStep.request) != null ? _a13 : {},
4495
+ response: lastStep.response,
4496
+ warnings: lastStep.warnings,
4497
+ experimental_providerMetadata: lastStep.experimental_providerMetadata,
4498
+ steps: recordedSteps
4499
+ }));
4500
+ rootSpan.setAttributes(
4501
+ selectTelemetryAttributes({
4502
+ telemetry,
4503
+ attributes: {
4504
+ "ai.response.finishReason": finishReason,
4505
+ "ai.response.text": { output: () => recordedFullText },
4506
+ "ai.response.toolCalls": {
4507
+ output: () => {
4508
+ var _a14;
4509
+ return ((_a14 = lastStep.toolCalls) == null ? void 0 : _a14.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
4510
+ }
4511
+ },
4512
+ "ai.usage.promptTokens": usage.promptTokens,
4513
+ "ai.usage.completionTokens": usage.completionTokens
4514
+ }
4515
+ })
4516
+ );
4517
+ } catch (error) {
4518
+ controller.error(error);
4519
+ } finally {
4520
+ rootSpan.end();
4521
+ }
4522
+ }
4523
+ });
4372
4524
  const stitchableStream = createStitchableStream();
4373
4525
  this.addStream = stitchableStream.addStream;
4374
4526
  this.closeStream = stitchableStream.close;
4375
- this.baseStream = transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream;
4527
+ this.baseStream = (transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream).pipeThrough(eventProcessor);
4376
4528
  const { maxRetries, retry } = prepareRetries({
4377
4529
  maxRetries: maxRetriesArg
4378
4530
  });
@@ -4404,13 +4556,13 @@ var DefaultStreamTextResult = class {
4404
4556
  }),
4405
4557
  tracer,
4406
4558
  endWhenDone: false,
4407
- fn: async (rootSpan) => {
4408
- const stepResults = [];
4559
+ fn: async (rootSpanArg) => {
4560
+ rootSpan = rootSpanArg;
4409
4561
  async function streamStep({
4410
4562
  currentStep,
4411
4563
  responseMessages,
4412
4564
  usage,
4413
- stepType,
4565
+ stepType: stepType2,
4414
4566
  previousStepText,
4415
4567
  hasLeadingWhitespace
4416
4568
  }) {
@@ -4516,7 +4668,7 @@ var DefaultStreamTextResult = class {
4516
4668
  let stepProviderMetadata;
4517
4669
  let stepFirstChunk = true;
4518
4670
  let stepText = "";
4519
- let fullStepText = stepType === "continue" ? previousStepText : "";
4671
+ let fullStepText = stepType2 === "continue" ? previousStepText : "";
4520
4672
  let stepLogProbs;
4521
4673
  let stepResponse = {
4522
4674
  id: generateId3(),
@@ -4536,7 +4688,6 @@ var DefaultStreamTextResult = class {
4536
4688
  fullStepText += chunk.textDelta;
4537
4689
  chunkTextPublished = true;
4538
4690
  hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4539
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4540
4691
  }
4541
4692
  self.addStream(
4542
4693
  transformedStream.pipeThrough(
@@ -4585,13 +4736,11 @@ var DefaultStreamTextResult = class {
4585
4736
  case "tool-call": {
4586
4737
  controller.enqueue(chunk);
4587
4738
  stepToolCalls.push(chunk);
4588
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4589
4739
  break;
4590
4740
  }
4591
4741
  case "tool-result": {
4592
4742
  controller.enqueue(chunk);
4593
4743
  stepToolResults.push(chunk);
4594
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4595
4744
  break;
4596
4745
  }
4597
4746
  case "response-metadata": {
@@ -4618,7 +4767,6 @@ var DefaultStreamTextResult = class {
4618
4767
  case "tool-call-streaming-start":
4619
4768
  case "tool-call-delta": {
4620
4769
  controller.enqueue(chunk);
4621
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4622
4770
  break;
4623
4771
  }
4624
4772
  case "error": {
@@ -4649,7 +4797,7 @@ var DefaultStreamTextResult = class {
4649
4797
  }
4650
4798
  }
4651
4799
  if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4652
- stepType === "continue" && !chunkTextPublished)) {
4800
+ stepType2 === "continue" && !chunkTextPublished)) {
4653
4801
  await publishTextChunk({
4654
4802
  controller,
4655
4803
  chunk: {
@@ -4693,69 +4841,16 @@ var DefaultStreamTextResult = class {
4693
4841
  usage: stepUsage,
4694
4842
  experimental_providerMetadata: stepProviderMetadata,
4695
4843
  logprobs: stepLogProbs,
4696
- response: {
4697
- ...stepResponse
4698
- },
4699
- isContinued: nextStepType === "continue"
4700
- });
4701
- if (stepType === "continue") {
4702
- const lastMessage = responseMessages[responseMessages.length - 1];
4703
- if (typeof lastMessage.content === "string") {
4704
- lastMessage.content += stepText;
4705
- } else {
4706
- lastMessage.content.push({
4707
- text: stepText,
4708
- type: "text"
4709
- });
4710
- }
4711
- } else {
4712
- responseMessages.push(
4713
- ...toResponseMessages({
4714
- text: stepText,
4715
- tools: tools != null ? tools : {},
4716
- toolCalls: stepToolCalls,
4717
- toolResults: stepToolResults
4718
- })
4719
- );
4720
- }
4721
- const currentStepResult = {
4722
- stepType,
4723
- text: stepText,
4724
- toolCalls: stepToolCalls,
4725
- toolResults: stepToolResults,
4726
- finishReason: stepFinishReason,
4727
- usage: stepUsage,
4728
- warnings,
4729
- logprobs: stepLogProbs,
4730
4844
  request: stepRequest,
4731
4845
  response: {
4732
4846
  ...stepResponse,
4733
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4734
- // deep clone msgs to avoid mutating past messages in multi-step:
4735
- messages: JSON.parse(JSON.stringify(responseMessages))
4847
+ headers: rawResponse == null ? void 0 : rawResponse.headers
4736
4848
  },
4737
- experimental_providerMetadata: stepProviderMetadata,
4849
+ warnings,
4738
4850
  isContinued: nextStepType === "continue"
4739
- };
4740
- stepResults.push(currentStepResult);
4741
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4742
- const combinedUsage = {
4743
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4744
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4745
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4746
- };
4747
- if (nextStepType !== "done") {
4748
- await streamStep({
4749
- currentStep: currentStep + 1,
4750
- responseMessages,
4751
- usage: combinedUsage,
4752
- stepType: nextStepType,
4753
- previousStepText: fullStepText,
4754
- hasLeadingWhitespace: hasWhitespaceSuffix
4755
- });
4756
- return;
4757
- }
4758
- try {
4851
+ });
4852
+ const combinedUsage = addLanguageModelUsage(usage, stepUsage);
4853
+ if (nextStepType === "done") {
4759
4854
  controller.enqueue({
4760
4855
  type: "finish",
4761
4856
  finishReason: stepFinishReason,
@@ -4763,63 +4858,40 @@ var DefaultStreamTextResult = class {
4763
4858
  experimental_providerMetadata: stepProviderMetadata,
4764
4859
  logprobs: stepLogProbs,
4765
4860
  response: {
4766
- ...stepResponse
4861
+ ...stepResponse,
4862
+ headers: rawResponse == null ? void 0 : rawResponse.headers
4767
4863
  }
4768
4864
  });
4769
4865
  self.closeStream();
4770
- rootSpan.setAttributes(
4771
- selectTelemetryAttributes({
4772
- telemetry,
4773
- attributes: {
4774
- "ai.response.finishReason": stepFinishReason,
4775
- "ai.response.text": { output: () => fullStepText },
4776
- "ai.response.toolCalls": {
4777
- output: () => stepToolCallsJson
4778
- },
4779
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4780
- "ai.usage.completionTokens": combinedUsage.completionTokens
4781
- }
4782
- })
4783
- );
4784
- self.usagePromise.resolve(combinedUsage);
4785
- self.finishReasonPromise.resolve(stepFinishReason);
4786
- self.textPromise.resolve(fullStepText);
4787
- self.toolCallsPromise.resolve(stepToolCalls);
4788
- self.providerMetadataPromise.resolve(stepProviderMetadata);
4789
- self.toolResultsPromise.resolve(stepToolResults);
4790
- self.requestPromise.resolve(stepRequest);
4791
- self.responsePromise.resolve({
4792
- ...stepResponse,
4793
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4794
- messages: responseMessages
4795
- });
4796
- self.stepsPromise.resolve(stepResults);
4797
- self.warningsPromise.resolve(warnings != null ? warnings : []);
4798
- await (onFinish == null ? void 0 : onFinish({
4799
- finishReason: stepFinishReason,
4800
- logprobs: stepLogProbs,
4866
+ } else {
4867
+ if (stepType2 === "continue") {
4868
+ const lastMessage = responseMessages[responseMessages.length - 1];
4869
+ if (typeof lastMessage.content === "string") {
4870
+ lastMessage.content += stepText;
4871
+ } else {
4872
+ lastMessage.content.push({
4873
+ text: stepText,
4874
+ type: "text"
4875
+ });
4876
+ }
4877
+ } else {
4878
+ responseMessages.push(
4879
+ ...toResponseMessages({
4880
+ text: stepText,
4881
+ tools: tools != null ? tools : {},
4882
+ toolCalls: stepToolCalls,
4883
+ toolResults: stepToolResults
4884
+ })
4885
+ );
4886
+ }
4887
+ await streamStep({
4888
+ currentStep: currentStep + 1,
4889
+ responseMessages,
4801
4890
  usage: combinedUsage,
4802
- text: fullStepText,
4803
- toolCalls: stepToolCalls,
4804
- // The tool results are inferred as a never[] type, because they are
4805
- // optional and the execute method with an inferred result type is
4806
- // optional as well. Therefore we need to cast the toolResults to any.
4807
- // The type exposed to the users will be correctly inferred.
4808
- toolResults: stepToolResults,
4809
- request: stepRequest,
4810
- response: {
4811
- ...stepResponse,
4812
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4813
- messages: responseMessages
4814
- },
4815
- warnings,
4816
- experimental_providerMetadata: stepProviderMetadata,
4817
- steps: stepResults
4818
- }));
4819
- } catch (error) {
4820
- controller.error(error);
4821
- } finally {
4822
- rootSpan.end();
4891
+ stepType: nextStepType,
4892
+ previousStepText: fullStepText,
4893
+ hasLeadingWhitespace: hasWhitespaceSuffix
4894
+ });
4823
4895
  }
4824
4896
  }
4825
4897
  })