ai 4.0.18 → 4.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.19
4
+
5
+ ### Patch Changes
6
+
7
+ - c3a6065: fix (ai/core): apply transform before callbacks and resolvables
8
+
3
9
  ## 4.0.18
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1978,7 +1978,9 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1978
1978
  finishReason: FinishReason;
1979
1979
  logprobs?: LogProbs;
1980
1980
  usage: LanguageModelUsage;
1981
+ request: LanguageModelRequestMetadata;
1981
1982
  response: LanguageModelResponseMetadata;
1983
+ warnings: CallWarning[] | undefined;
1982
1984
  experimental_providerMetadata?: ProviderMetadata;
1983
1985
  isContinued: boolean;
1984
1986
  } | {
package/dist/index.d.ts CHANGED
@@ -1978,7 +1978,9 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1978
1978
  finishReason: FinishReason;
1979
1979
  logprobs?: LogProbs;
1980
1980
  usage: LanguageModelUsage;
1981
+ request: LanguageModelRequestMetadata;
1981
1982
  response: LanguageModelResponseMetadata;
1983
+ warnings: CallWarning[] | undefined;
1982
1984
  experimental_providerMetadata?: ProviderMetadata;
1983
1985
  isContinued: boolean;
1984
1986
  } | {
package/dist/index.js CHANGED
@@ -1804,6 +1804,13 @@ function calculateLanguageModelUsage({
1804
1804
  totalTokens: promptTokens + completionTokens
1805
1805
  };
1806
1806
  }
1807
+ function addLanguageModelUsage(usage1, usage2) {
1808
+ return {
1809
+ promptTokens: usage1.promptTokens + usage2.promptTokens,
1810
+ completionTokens: usage1.completionTokens + usage2.completionTokens,
1811
+ totalTokens: usage1.totalTokens + usage2.totalTokens
1812
+ };
1813
+ }
1807
1814
 
1808
1815
  // core/generate-object/inject-json-instruction.ts
1809
1816
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
@@ -3622,7 +3629,7 @@ async function generateText({
3622
3629
  const responseMessages = [];
3623
3630
  let text2 = "";
3624
3631
  const steps = [];
3625
- const usage = {
3632
+ let usage = {
3626
3633
  completionTokens: 0,
3627
3634
  promptTokens: 0,
3628
3635
  totalTokens: 0
@@ -3749,9 +3756,7 @@ async function generateText({
3749
3756
  const currentUsage = calculateLanguageModelUsage(
3750
3757
  currentModelResponse.usage
3751
3758
  );
3752
- usage.completionTokens += currentUsage.completionTokens;
3753
- usage.promptTokens += currentUsage.promptTokens;
3754
- usage.totalTokens += currentUsage.totalTokens;
3759
+ usage = addLanguageModelUsage(usage, currentUsage);
3755
3760
  let nextStepType = "done";
3756
3761
  if (++stepCount < maxSteps) {
3757
3762
  if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
@@ -4409,10 +4414,168 @@ var DefaultStreamTextResult = class {
4409
4414
  message: "maxSteps must be at least 1"
4410
4415
  });
4411
4416
  }
4417
+ let recordedStepText = "";
4418
+ let recordedContinuationText = "";
4419
+ let recordedFullText = "";
4420
+ let recordedRequest = void 0;
4421
+ const recordedResponse = {
4422
+ id: generateId3(),
4423
+ timestamp: currentDate(),
4424
+ modelId: model.modelId,
4425
+ messages: []
4426
+ };
4427
+ let recordedToolCalls = [];
4428
+ let recordedToolResults = [];
4429
+ let recordedFinishReason = void 0;
4430
+ let recordedUsage = void 0;
4431
+ let recordedProviderMetadata = void 0;
4432
+ let stepType = "initial";
4433
+ const recordedSteps = [];
4434
+ let rootSpan;
4435
+ const eventProcessor = new TransformStream({
4436
+ async transform(chunk, controller) {
4437
+ controller.enqueue(chunk);
4438
+ if (chunk.type === "text-delta" || chunk.type === "tool-call" || chunk.type === "tool-result" || chunk.type === "tool-call-streaming-start" || chunk.type === "tool-call-delta") {
4439
+ await (onChunk == null ? void 0 : onChunk({ chunk }));
4440
+ }
4441
+ if (chunk.type === "text-delta") {
4442
+ recordedStepText += chunk.textDelta;
4443
+ recordedContinuationText += chunk.textDelta;
4444
+ recordedFullText += chunk.textDelta;
4445
+ }
4446
+ if (chunk.type === "tool-call") {
4447
+ recordedToolCalls.push(chunk);
4448
+ }
4449
+ if (chunk.type === "tool-result") {
4450
+ recordedToolResults.push(chunk);
4451
+ }
4452
+ if (chunk.type === "step-finish") {
4453
+ const stepMessages = toResponseMessages({
4454
+ text: recordedContinuationText,
4455
+ tools: tools != null ? tools : {},
4456
+ toolCalls: recordedToolCalls,
4457
+ toolResults: recordedToolResults
4458
+ });
4459
+ const currentStep = recordedSteps.length;
4460
+ let nextStepType = "done";
4461
+ if (currentStep + 1 < maxSteps) {
4462
+ if (continueSteps && chunk.finishReason === "length" && // only use continue when there are no tool calls:
4463
+ recordedToolCalls.length === 0) {
4464
+ nextStepType = "continue";
4465
+ } else if (
4466
+ // there are tool calls:
4467
+ recordedToolCalls.length > 0 && // all current tool calls have results:
4468
+ recordedToolResults.length === recordedToolCalls.length
4469
+ ) {
4470
+ nextStepType = "tool-result";
4471
+ }
4472
+ }
4473
+ const currentStepResult = {
4474
+ stepType,
4475
+ text: recordedStepText,
4476
+ toolCalls: recordedToolCalls,
4477
+ toolResults: recordedToolResults,
4478
+ finishReason: chunk.finishReason,
4479
+ usage: chunk.usage,
4480
+ warnings: chunk.warnings,
4481
+ logprobs: chunk.logprobs,
4482
+ request: chunk.request,
4483
+ response: {
4484
+ ...chunk.response,
4485
+ messages: [...recordedResponse.messages, ...stepMessages]
4486
+ },
4487
+ experimental_providerMetadata: chunk.experimental_providerMetadata,
4488
+ isContinued: chunk.isContinued
4489
+ };
4490
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4491
+ recordedSteps.push(currentStepResult);
4492
+ recordedToolCalls = [];
4493
+ recordedToolResults = [];
4494
+ recordedStepText = "";
4495
+ recordedRequest = chunk.request;
4496
+ if (nextStepType !== "done") {
4497
+ stepType = nextStepType;
4498
+ }
4499
+ if (nextStepType !== "continue") {
4500
+ recordedResponse.messages.push(...stepMessages);
4501
+ recordedContinuationText = "";
4502
+ }
4503
+ }
4504
+ if (chunk.type === "finish") {
4505
+ recordedResponse.id = chunk.response.id;
4506
+ recordedResponse.timestamp = chunk.response.timestamp;
4507
+ recordedResponse.modelId = chunk.response.modelId;
4508
+ recordedResponse.headers = chunk.response.headers;
4509
+ recordedUsage = chunk.usage;
4510
+ recordedFinishReason = chunk.finishReason;
4511
+ recordedProviderMetadata = chunk.experimental_providerMetadata;
4512
+ }
4513
+ },
4514
+ async flush(controller) {
4515
+ var _a13;
4516
+ try {
4517
+ const lastStep = recordedSteps[recordedSteps.length - 1];
4518
+ if (lastStep) {
4519
+ self.warningsPromise.resolve(lastStep.warnings);
4520
+ self.requestPromise.resolve(lastStep.request);
4521
+ self.responsePromise.resolve(lastStep.response);
4522
+ self.toolCallsPromise.resolve(lastStep.toolCalls);
4523
+ self.toolResultsPromise.resolve(lastStep.toolResults);
4524
+ self.providerMetadataPromise.resolve(
4525
+ lastStep.experimental_providerMetadata
4526
+ );
4527
+ }
4528
+ const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
4529
+ const usage = recordedUsage != null ? recordedUsage : {
4530
+ completionTokens: NaN,
4531
+ promptTokens: NaN,
4532
+ totalTokens: NaN
4533
+ };
4534
+ self.finishReasonPromise.resolve(finishReason);
4535
+ self.usagePromise.resolve(usage);
4536
+ self.textPromise.resolve(recordedFullText);
4537
+ self.stepsPromise.resolve(recordedSteps);
4538
+ await (onFinish == null ? void 0 : onFinish({
4539
+ finishReason,
4540
+ logprobs: void 0,
4541
+ usage,
4542
+ text: recordedFullText,
4543
+ toolCalls: lastStep.toolCalls,
4544
+ toolResults: lastStep.toolResults,
4545
+ request: (_a13 = lastStep.request) != null ? _a13 : {},
4546
+ response: lastStep.response,
4547
+ warnings: lastStep.warnings,
4548
+ experimental_providerMetadata: lastStep.experimental_providerMetadata,
4549
+ steps: recordedSteps
4550
+ }));
4551
+ rootSpan.setAttributes(
4552
+ selectTelemetryAttributes({
4553
+ telemetry,
4554
+ attributes: {
4555
+ "ai.response.finishReason": finishReason,
4556
+ "ai.response.text": { output: () => recordedFullText },
4557
+ "ai.response.toolCalls": {
4558
+ output: () => {
4559
+ var _a14;
4560
+ return ((_a14 = lastStep.toolCalls) == null ? void 0 : _a14.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
4561
+ }
4562
+ },
4563
+ "ai.usage.promptTokens": usage.promptTokens,
4564
+ "ai.usage.completionTokens": usage.completionTokens
4565
+ }
4566
+ })
4567
+ );
4568
+ } catch (error) {
4569
+ controller.error(error);
4570
+ } finally {
4571
+ rootSpan.end();
4572
+ }
4573
+ }
4574
+ });
4412
4575
  const stitchableStream = createStitchableStream();
4413
4576
  this.addStream = stitchableStream.addStream;
4414
4577
  this.closeStream = stitchableStream.close;
4415
- this.baseStream = transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream;
4578
+ this.baseStream = (transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream).pipeThrough(eventProcessor);
4416
4579
  const { maxRetries, retry } = prepareRetries({
4417
4580
  maxRetries: maxRetriesArg
4418
4581
  });
@@ -4444,13 +4607,13 @@ var DefaultStreamTextResult = class {
4444
4607
  }),
4445
4608
  tracer,
4446
4609
  endWhenDone: false,
4447
- fn: async (rootSpan) => {
4448
- const stepResults = [];
4610
+ fn: async (rootSpanArg) => {
4611
+ rootSpan = rootSpanArg;
4449
4612
  async function streamStep({
4450
4613
  currentStep,
4451
4614
  responseMessages,
4452
4615
  usage,
4453
- stepType,
4616
+ stepType: stepType2,
4454
4617
  previousStepText,
4455
4618
  hasLeadingWhitespace
4456
4619
  }) {
@@ -4556,7 +4719,7 @@ var DefaultStreamTextResult = class {
4556
4719
  let stepProviderMetadata;
4557
4720
  let stepFirstChunk = true;
4558
4721
  let stepText = "";
4559
- let fullStepText = stepType === "continue" ? previousStepText : "";
4722
+ let fullStepText = stepType2 === "continue" ? previousStepText : "";
4560
4723
  let stepLogProbs;
4561
4724
  let stepResponse = {
4562
4725
  id: generateId3(),
@@ -4576,7 +4739,6 @@ var DefaultStreamTextResult = class {
4576
4739
  fullStepText += chunk.textDelta;
4577
4740
  chunkTextPublished = true;
4578
4741
  hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
4579
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4580
4742
  }
4581
4743
  self.addStream(
4582
4744
  transformedStream.pipeThrough(
@@ -4625,13 +4787,11 @@ var DefaultStreamTextResult = class {
4625
4787
  case "tool-call": {
4626
4788
  controller.enqueue(chunk);
4627
4789
  stepToolCalls.push(chunk);
4628
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4629
4790
  break;
4630
4791
  }
4631
4792
  case "tool-result": {
4632
4793
  controller.enqueue(chunk);
4633
4794
  stepToolResults.push(chunk);
4634
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4635
4795
  break;
4636
4796
  }
4637
4797
  case "response-metadata": {
@@ -4658,7 +4818,6 @@ var DefaultStreamTextResult = class {
4658
4818
  case "tool-call-streaming-start":
4659
4819
  case "tool-call-delta": {
4660
4820
  controller.enqueue(chunk);
4661
- await (onChunk == null ? void 0 : onChunk({ chunk }));
4662
4821
  break;
4663
4822
  }
4664
4823
  case "error": {
@@ -4689,7 +4848,7 @@ var DefaultStreamTextResult = class {
4689
4848
  }
4690
4849
  }
4691
4850
  if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
4692
- stepType === "continue" && !chunkTextPublished)) {
4851
+ stepType2 === "continue" && !chunkTextPublished)) {
4693
4852
  await publishTextChunk({
4694
4853
  controller,
4695
4854
  chunk: {
@@ -4733,69 +4892,16 @@ var DefaultStreamTextResult = class {
4733
4892
  usage: stepUsage,
4734
4893
  experimental_providerMetadata: stepProviderMetadata,
4735
4894
  logprobs: stepLogProbs,
4736
- response: {
4737
- ...stepResponse
4738
- },
4739
- isContinued: nextStepType === "continue"
4740
- });
4741
- if (stepType === "continue") {
4742
- const lastMessage = responseMessages[responseMessages.length - 1];
4743
- if (typeof lastMessage.content === "string") {
4744
- lastMessage.content += stepText;
4745
- } else {
4746
- lastMessage.content.push({
4747
- text: stepText,
4748
- type: "text"
4749
- });
4750
- }
4751
- } else {
4752
- responseMessages.push(
4753
- ...toResponseMessages({
4754
- text: stepText,
4755
- tools: tools != null ? tools : {},
4756
- toolCalls: stepToolCalls,
4757
- toolResults: stepToolResults
4758
- })
4759
- );
4760
- }
4761
- const currentStepResult = {
4762
- stepType,
4763
- text: stepText,
4764
- toolCalls: stepToolCalls,
4765
- toolResults: stepToolResults,
4766
- finishReason: stepFinishReason,
4767
- usage: stepUsage,
4768
- warnings,
4769
- logprobs: stepLogProbs,
4770
4895
  request: stepRequest,
4771
4896
  response: {
4772
4897
  ...stepResponse,
4773
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4774
- // deep clone msgs to avoid mutating past messages in multi-step:
4775
- messages: JSON.parse(JSON.stringify(responseMessages))
4898
+ headers: rawResponse == null ? void 0 : rawResponse.headers
4776
4899
  },
4777
- experimental_providerMetadata: stepProviderMetadata,
4900
+ warnings,
4778
4901
  isContinued: nextStepType === "continue"
4779
- };
4780
- stepResults.push(currentStepResult);
4781
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4782
- const combinedUsage = {
4783
- promptTokens: usage.promptTokens + stepUsage.promptTokens,
4784
- completionTokens: usage.completionTokens + stepUsage.completionTokens,
4785
- totalTokens: usage.totalTokens + stepUsage.totalTokens
4786
- };
4787
- if (nextStepType !== "done") {
4788
- await streamStep({
4789
- currentStep: currentStep + 1,
4790
- responseMessages,
4791
- usage: combinedUsage,
4792
- stepType: nextStepType,
4793
- previousStepText: fullStepText,
4794
- hasLeadingWhitespace: hasWhitespaceSuffix
4795
- });
4796
- return;
4797
- }
4798
- try {
4902
+ });
4903
+ const combinedUsage = addLanguageModelUsage(usage, stepUsage);
4904
+ if (nextStepType === "done") {
4799
4905
  controller.enqueue({
4800
4906
  type: "finish",
4801
4907
  finishReason: stepFinishReason,
@@ -4803,63 +4909,40 @@ var DefaultStreamTextResult = class {
4803
4909
  experimental_providerMetadata: stepProviderMetadata,
4804
4910
  logprobs: stepLogProbs,
4805
4911
  response: {
4806
- ...stepResponse
4912
+ ...stepResponse,
4913
+ headers: rawResponse == null ? void 0 : rawResponse.headers
4807
4914
  }
4808
4915
  });
4809
4916
  self.closeStream();
4810
- rootSpan.setAttributes(
4811
- selectTelemetryAttributes({
4812
- telemetry,
4813
- attributes: {
4814
- "ai.response.finishReason": stepFinishReason,
4815
- "ai.response.text": { output: () => fullStepText },
4816
- "ai.response.toolCalls": {
4817
- output: () => stepToolCallsJson
4818
- },
4819
- "ai.usage.promptTokens": combinedUsage.promptTokens,
4820
- "ai.usage.completionTokens": combinedUsage.completionTokens
4821
- }
4822
- })
4823
- );
4824
- self.usagePromise.resolve(combinedUsage);
4825
- self.finishReasonPromise.resolve(stepFinishReason);
4826
- self.textPromise.resolve(fullStepText);
4827
- self.toolCallsPromise.resolve(stepToolCalls);
4828
- self.providerMetadataPromise.resolve(stepProviderMetadata);
4829
- self.toolResultsPromise.resolve(stepToolResults);
4830
- self.requestPromise.resolve(stepRequest);
4831
- self.responsePromise.resolve({
4832
- ...stepResponse,
4833
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4834
- messages: responseMessages
4835
- });
4836
- self.stepsPromise.resolve(stepResults);
4837
- self.warningsPromise.resolve(warnings != null ? warnings : []);
4838
- await (onFinish == null ? void 0 : onFinish({
4839
- finishReason: stepFinishReason,
4840
- logprobs: stepLogProbs,
4917
+ } else {
4918
+ if (stepType2 === "continue") {
4919
+ const lastMessage = responseMessages[responseMessages.length - 1];
4920
+ if (typeof lastMessage.content === "string") {
4921
+ lastMessage.content += stepText;
4922
+ } else {
4923
+ lastMessage.content.push({
4924
+ text: stepText,
4925
+ type: "text"
4926
+ });
4927
+ }
4928
+ } else {
4929
+ responseMessages.push(
4930
+ ...toResponseMessages({
4931
+ text: stepText,
4932
+ tools: tools != null ? tools : {},
4933
+ toolCalls: stepToolCalls,
4934
+ toolResults: stepToolResults
4935
+ })
4936
+ );
4937
+ }
4938
+ await streamStep({
4939
+ currentStep: currentStep + 1,
4940
+ responseMessages,
4841
4941
  usage: combinedUsage,
4842
- text: fullStepText,
4843
- toolCalls: stepToolCalls,
4844
- // The tool results are inferred as a never[] type, because they are
4845
- // optional and the execute method with an inferred result type is
4846
- // optional as well. Therefore we need to cast the toolResults to any.
4847
- // The type exposed to the users will be correctly inferred.
4848
- toolResults: stepToolResults,
4849
- request: stepRequest,
4850
- response: {
4851
- ...stepResponse,
4852
- headers: rawResponse == null ? void 0 : rawResponse.headers,
4853
- messages: responseMessages
4854
- },
4855
- warnings,
4856
- experimental_providerMetadata: stepProviderMetadata,
4857
- steps: stepResults
4858
- }));
4859
- } catch (error) {
4860
- controller.error(error);
4861
- } finally {
4862
- rootSpan.end();
4942
+ stepType: nextStepType,
4943
+ previousStepText: fullStepText,
4944
+ hasLeadingWhitespace: hasWhitespaceSuffix
4945
+ });
4863
4946
  }
4864
4947
  }
4865
4948
  })