ai 3.3.36 → 3.3.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1112,8 +1112,8 @@ function prepareCallSettings({
1112
1112
  if (typeof topK !== "number") {
1113
1113
  throw new InvalidArgumentError({
1114
1114
  parameter: "topK",
1115
- value: topP,
1116
- message: "topP must be a number"
1115
+ value: topK,
1116
+ message: "topK must be a number"
1117
1117
  });
1118
1118
  }
1119
1119
  }
@@ -1768,7 +1768,7 @@ function validateObjectGenerationInput({
1768
1768
  }
1769
1769
 
1770
1770
  // core/generate-object/generate-object.ts
1771
- var originalGenerateId = createIdGenerator({ prefix: "aiobj-", length: 24 });
1771
+ var originalGenerateId = createIdGenerator({ prefix: "aiobj-", size: 24 });
1772
1772
  async function generateObject({
1773
1773
  model,
1774
1774
  enum: enumValues,
@@ -2257,7 +2257,7 @@ function writeToServerResponse({
2257
2257
  }
2258
2258
 
2259
2259
  // core/generate-object/stream-object.ts
2260
- var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", length: 24 });
2260
+ var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", size: 24 });
2261
2261
  async function streamObject({
2262
2262
  model,
2263
2263
  schema: inputSchema,
@@ -2978,7 +2978,7 @@ function parseToolCall({
2978
2978
  }
2979
2979
 
2980
2980
  // core/generate-text/generate-text.ts
2981
- var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", length: 24 });
2981
+ var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", size: 24 });
2982
2982
  async function generateText({
2983
2983
  model,
2984
2984
  tools,
@@ -3658,7 +3658,7 @@ function runToolsTransformation({
3658
3658
  }
3659
3659
 
3660
3660
  // core/generate-text/stream-text.ts
3661
- var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", length: 24 });
3661
+ var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", size: 24 });
3662
3662
  async function streamText({
3663
3663
  model,
3664
3664
  tools,
@@ -3676,6 +3676,7 @@ async function streamText({
3676
3676
  experimental_toolCallStreaming: toolCallStreaming = false,
3677
3677
  onChunk,
3678
3678
  onFinish,
3679
+ onStepFinish,
3679
3680
  _internal: {
3680
3681
  now: now2 = now,
3681
3682
  generateId: generateId3 = originalGenerateId4,
@@ -3808,6 +3809,7 @@ async function streamText({
3808
3809
  rawResponse,
3809
3810
  onChunk,
3810
3811
  onFinish,
3812
+ onStepFinish,
3811
3813
  rootSpan,
3812
3814
  doStreamSpan,
3813
3815
  telemetry,
@@ -3830,6 +3832,7 @@ var DefaultStreamTextResult = class {
3830
3832
  rawResponse,
3831
3833
  onChunk,
3832
3834
  onFinish,
3835
+ onStepFinish,
3833
3836
  rootSpan,
3834
3837
  doStreamSpan,
3835
3838
  telemetry,
@@ -3982,26 +3985,7 @@ var DefaultStreamTextResult = class {
3982
3985
  },
3983
3986
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
3984
3987
  async flush(controller) {
3985
- controller.enqueue({
3986
- type: "step-finish",
3987
- finishReason: stepFinishReason,
3988
- usage: stepUsage,
3989
- experimental_providerMetadata: stepProviderMetadata,
3990
- logprobs: stepLogProbs,
3991
- response: stepResponse
3992
- });
3993
- stepResults.push({
3994
- text: stepText,
3995
- toolCalls: stepToolCalls,
3996
- toolResults: stepToolResults,
3997
- finishReason: stepFinishReason,
3998
- usage: stepUsage,
3999
- warnings: self.warnings,
4000
- logprobs: stepLogProbs,
4001
- response: stepResponse,
4002
- rawResponse: self.rawResponse
4003
- });
4004
- const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
3988
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4005
3989
  try {
4006
3990
  doStreamSpan2.setAttributes(
4007
3991
  selectTelemetryAttributes({
@@ -4010,7 +3994,7 @@ var DefaultStreamTextResult = class {
4010
3994
  "ai.response.finishReason": stepFinishReason,
4011
3995
  "ai.response.text": { output: () => stepText },
4012
3996
  "ai.response.toolCalls": {
4013
- output: () => telemetryToolCalls
3997
+ output: () => stepToolCallsJson
4014
3998
  },
4015
3999
  "ai.response.id": stepResponse.id,
4016
4000
  "ai.response.model": stepResponse.modelId,
@@ -4021,7 +4005,7 @@ var DefaultStreamTextResult = class {
4021
4005
  "ai.finishReason": stepFinishReason,
4022
4006
  "ai.result.text": { output: () => stepText },
4023
4007
  "ai.result.toolCalls": {
4024
- output: () => telemetryToolCalls
4008
+ output: () => stepToolCallsJson
4025
4009
  },
4026
4010
  // standardized gen-ai llm span attributes:
4027
4011
  "gen_ai.response.finish_reasons": [stepFinishReason],
@@ -4036,6 +4020,27 @@ var DefaultStreamTextResult = class {
4036
4020
  } finally {
4037
4021
  doStreamSpan2.end();
4038
4022
  }
4023
+ controller.enqueue({
4024
+ type: "step-finish",
4025
+ finishReason: stepFinishReason,
4026
+ usage: stepUsage,
4027
+ experimental_providerMetadata: stepProviderMetadata,
4028
+ logprobs: stepLogProbs,
4029
+ response: stepResponse
4030
+ });
4031
+ const stepResult = {
4032
+ text: stepText,
4033
+ toolCalls: stepToolCalls,
4034
+ toolResults: stepToolResults,
4035
+ finishReason: stepFinishReason,
4036
+ usage: stepUsage,
4037
+ warnings: self.warnings,
4038
+ logprobs: stepLogProbs,
4039
+ response: stepResponse,
4040
+ rawResponse: self.rawResponse
4041
+ };
4042
+ stepResults.push(stepResult);
4043
+ await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4039
4044
  const combinedUsage = {
4040
4045
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4041
4046
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
@@ -4093,7 +4098,7 @@ var DefaultStreamTextResult = class {
4093
4098
  "ai.response.finishReason": stepFinishReason,
4094
4099
  "ai.response.text": { output: () => stepText },
4095
4100
  "ai.response.toolCalls": {
4096
- output: () => telemetryToolCalls
4101
+ output: () => stepToolCallsJson
4097
4102
  },
4098
4103
  "ai.usage.promptTokens": combinedUsage.promptTokens,
4099
4104
  "ai.usage.completionTokens": combinedUsage.completionTokens,
@@ -4101,7 +4106,7 @@ var DefaultStreamTextResult = class {
4101
4106
  "ai.finishReason": stepFinishReason,
4102
4107
  "ai.result.text": { output: () => stepText },
4103
4108
  "ai.result.toolCalls": {
4104
- output: () => telemetryToolCalls
4109
+ output: () => stepToolCallsJson
4105
4110
  }
4106
4111
  }
4107
4112
  })
@@ -4119,6 +4124,7 @@ var DefaultStreamTextResult = class {
4119
4124
  resolveSteps(stepResults);
4120
4125
  await (onFinish == null ? void 0 : onFinish({
4121
4126
  finishReason: stepFinishReason,
4127
+ logprobs: stepLogProbs,
4122
4128
  usage: combinedUsage,
4123
4129
  text: stepText,
4124
4130
  toolCalls: stepToolCalls,