ai 3.3.37 → 3.3.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # ai
2
2
 
3
+ ## 3.3.38
4
+
5
+ ### Patch Changes
6
+
7
+ - 83da52c: feat (ai/core): add onStepFinish callback to streamText
8
+
3
9
  ## 3.3.37
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1806,7 +1806,7 @@ If set and supported by the model, calls will generate deterministic results.
1806
1806
  @return
1807
1807
  A result object for accessing different stream types and additional information.
1808
1808
  */
1809
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1809
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1810
1810
  /**
1811
1811
  The language model to use.
1812
1812
  */
@@ -1868,57 +1868,22 @@ Callback that is called for each chunk of the stream. The stream processing will
1868
1868
  Callback that is called when the LLM response and all request tool executions
1869
1869
  (for tools that have an `execute` function) are finished.
1870
1870
  */
1871
- onFinish?: (event: {
1871
+ onFinish?: (event: StepResult<TOOLS> & {
1872
1872
  /**
1873
- The reason why the generation finished.
1874
- */
1875
- finishReason: FinishReason;
1876
- /**
1877
- The token usage of the generated response.
1878
- */
1879
- usage: LanguageModelUsage$1;
1880
- /**
1881
- The full text that has been generated.
1882
- */
1883
- text: string;
1884
- /**
1885
- The tool calls that have been executed.
1886
- */
1887
- toolCalls?: ToToolCall<TOOLS>[];
1888
- /**
1889
- The tool results that have been generated.
1890
- */
1891
- toolResults?: ToToolResult<TOOLS>[];
1892
- /**
1893
- Optional raw response data.
1894
-
1895
- @deprecated Use `response` instead.
1896
- */
1897
- rawResponse?: {
1898
- /**
1899
- Response headers.
1900
- */
1901
- headers?: Record<string, string>;
1902
- };
1903
- /**
1904
- Response metadata.
1905
- */
1906
- response: LanguageModelResponseMetadataWithHeaders;
1907
- /**
1908
- Details for all steps.
1909
- */
1873
+ Details for all steps.
1874
+ */
1910
1875
  steps: StepResult<TOOLS>[];
1911
1876
  /**
1912
- Warnings from the model provider (e.g. unsupported settings).
1913
- */
1914
- warnings?: CallWarning[];
1915
- /**
1916
- Additional provider-specific metadata. They are passed through
1917
- from the provider to the AI SDK and enable provider-specific
1918
- results that can be fully encapsulated in the provider.
1919
- */
1877
+ Additional provider-specific metadata. They are passed through
1878
+ from the provider to the AI SDK and enable provider-specific
1879
+ results that can be fully encapsulated in the provider.
1880
+ */
1920
1881
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1921
1882
  }) => Promise<void> | void;
1883
+ /**
1884
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1885
+ */
1886
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1922
1887
  /**
1923
1888
  * Internal. For test use only. May change without notice.
1924
1889
  */
package/dist/index.d.ts CHANGED
@@ -1806,7 +1806,7 @@ If set and supported by the model, calls will generate deterministic results.
1806
1806
  @return
1807
1807
  A result object for accessing different stream types and additional information.
1808
1808
  */
1809
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1809
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1810
1810
  /**
1811
1811
  The language model to use.
1812
1812
  */
@@ -1868,57 +1868,22 @@ Callback that is called for each chunk of the stream. The stream processing will
1868
1868
  Callback that is called when the LLM response and all request tool executions
1869
1869
  (for tools that have an `execute` function) are finished.
1870
1870
  */
1871
- onFinish?: (event: {
1871
+ onFinish?: (event: StepResult<TOOLS> & {
1872
1872
  /**
1873
- The reason why the generation finished.
1874
- */
1875
- finishReason: FinishReason;
1876
- /**
1877
- The token usage of the generated response.
1878
- */
1879
- usage: LanguageModelUsage$1;
1880
- /**
1881
- The full text that has been generated.
1882
- */
1883
- text: string;
1884
- /**
1885
- The tool calls that have been executed.
1886
- */
1887
- toolCalls?: ToToolCall<TOOLS>[];
1888
- /**
1889
- The tool results that have been generated.
1890
- */
1891
- toolResults?: ToToolResult<TOOLS>[];
1892
- /**
1893
- Optional raw response data.
1894
-
1895
- @deprecated Use `response` instead.
1896
- */
1897
- rawResponse?: {
1898
- /**
1899
- Response headers.
1900
- */
1901
- headers?: Record<string, string>;
1902
- };
1903
- /**
1904
- Response metadata.
1905
- */
1906
- response: LanguageModelResponseMetadataWithHeaders;
1907
- /**
1908
- Details for all steps.
1909
- */
1873
+ Details for all steps.
1874
+ */
1910
1875
  steps: StepResult<TOOLS>[];
1911
1876
  /**
1912
- Warnings from the model provider (e.g. unsupported settings).
1913
- */
1914
- warnings?: CallWarning[];
1915
- /**
1916
- Additional provider-specific metadata. They are passed through
1917
- from the provider to the AI SDK and enable provider-specific
1918
- results that can be fully encapsulated in the provider.
1919
- */
1877
+ Additional provider-specific metadata. They are passed through
1878
+ from the provider to the AI SDK and enable provider-specific
1879
+ results that can be fully encapsulated in the provider.
1880
+ */
1920
1881
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1921
1882
  }) => Promise<void> | void;
1883
+ /**
1884
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1885
+ */
1886
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1922
1887
  /**
1923
1888
  * Internal. For test use only. May change without notice.
1924
1889
  */
package/dist/index.js CHANGED
@@ -3737,6 +3737,7 @@ async function streamText({
3737
3737
  experimental_toolCallStreaming: toolCallStreaming = false,
3738
3738
  onChunk,
3739
3739
  onFinish,
3740
+ onStepFinish,
3740
3741
  _internal: {
3741
3742
  now: now2 = now,
3742
3743
  generateId: generateId3 = originalGenerateId4,
@@ -3869,6 +3870,7 @@ async function streamText({
3869
3870
  rawResponse,
3870
3871
  onChunk,
3871
3872
  onFinish,
3873
+ onStepFinish,
3872
3874
  rootSpan,
3873
3875
  doStreamSpan,
3874
3876
  telemetry,
@@ -3891,6 +3893,7 @@ var DefaultStreamTextResult = class {
3891
3893
  rawResponse,
3892
3894
  onChunk,
3893
3895
  onFinish,
3896
+ onStepFinish,
3894
3897
  rootSpan,
3895
3898
  doStreamSpan,
3896
3899
  telemetry,
@@ -4043,26 +4046,7 @@ var DefaultStreamTextResult = class {
4043
4046
  },
4044
4047
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4045
4048
  async flush(controller) {
4046
- controller.enqueue({
4047
- type: "step-finish",
4048
- finishReason: stepFinishReason,
4049
- usage: stepUsage,
4050
- experimental_providerMetadata: stepProviderMetadata,
4051
- logprobs: stepLogProbs,
4052
- response: stepResponse
4053
- });
4054
- stepResults.push({
4055
- text: stepText,
4056
- toolCalls: stepToolCalls,
4057
- toolResults: stepToolResults,
4058
- finishReason: stepFinishReason,
4059
- usage: stepUsage,
4060
- warnings: self.warnings,
4061
- logprobs: stepLogProbs,
4062
- response: stepResponse,
4063
- rawResponse: self.rawResponse
4064
- });
4065
- const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4049
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4066
4050
  try {
4067
4051
  doStreamSpan2.setAttributes(
4068
4052
  selectTelemetryAttributes({
@@ -4071,7 +4055,7 @@ var DefaultStreamTextResult = class {
4071
4055
  "ai.response.finishReason": stepFinishReason,
4072
4056
  "ai.response.text": { output: () => stepText },
4073
4057
  "ai.response.toolCalls": {
4074
- output: () => telemetryToolCalls
4058
+ output: () => stepToolCallsJson
4075
4059
  },
4076
4060
  "ai.response.id": stepResponse.id,
4077
4061
  "ai.response.model": stepResponse.modelId,
@@ -4082,7 +4066,7 @@ var DefaultStreamTextResult = class {
4082
4066
  "ai.finishReason": stepFinishReason,
4083
4067
  "ai.result.text": { output: () => stepText },
4084
4068
  "ai.result.toolCalls": {
4085
- output: () => telemetryToolCalls
4069
+ output: () => stepToolCallsJson
4086
4070
  },
4087
4071
  // standardized gen-ai llm span attributes:
4088
4072
  "gen_ai.response.finish_reasons": [stepFinishReason],
@@ -4097,6 +4081,27 @@ var DefaultStreamTextResult = class {
4097
4081
  } finally {
4098
4082
  doStreamSpan2.end();
4099
4083
  }
4084
+ controller.enqueue({
4085
+ type: "step-finish",
4086
+ finishReason: stepFinishReason,
4087
+ usage: stepUsage,
4088
+ experimental_providerMetadata: stepProviderMetadata,
4089
+ logprobs: stepLogProbs,
4090
+ response: stepResponse
4091
+ });
4092
+ const stepResult = {
4093
+ text: stepText,
4094
+ toolCalls: stepToolCalls,
4095
+ toolResults: stepToolResults,
4096
+ finishReason: stepFinishReason,
4097
+ usage: stepUsage,
4098
+ warnings: self.warnings,
4099
+ logprobs: stepLogProbs,
4100
+ response: stepResponse,
4101
+ rawResponse: self.rawResponse
4102
+ };
4103
+ stepResults.push(stepResult);
4104
+ await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4100
4105
  const combinedUsage = {
4101
4106
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4102
4107
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
@@ -4154,7 +4159,7 @@ var DefaultStreamTextResult = class {
4154
4159
  "ai.response.finishReason": stepFinishReason,
4155
4160
  "ai.response.text": { output: () => stepText },
4156
4161
  "ai.response.toolCalls": {
4157
- output: () => telemetryToolCalls
4162
+ output: () => stepToolCallsJson
4158
4163
  },
4159
4164
  "ai.usage.promptTokens": combinedUsage.promptTokens,
4160
4165
  "ai.usage.completionTokens": combinedUsage.completionTokens,
@@ -4162,7 +4167,7 @@ var DefaultStreamTextResult = class {
4162
4167
  "ai.finishReason": stepFinishReason,
4163
4168
  "ai.result.text": { output: () => stepText },
4164
4169
  "ai.result.toolCalls": {
4165
- output: () => telemetryToolCalls
4170
+ output: () => stepToolCallsJson
4166
4171
  }
4167
4172
  }
4168
4173
  })
@@ -4180,6 +4185,7 @@ var DefaultStreamTextResult = class {
4180
4185
  resolveSteps(stepResults);
4181
4186
  await (onFinish == null ? void 0 : onFinish({
4182
4187
  finishReason: stepFinishReason,
4188
+ logprobs: stepLogProbs,
4183
4189
  usage: combinedUsage,
4184
4190
  text: stepText,
4185
4191
  toolCalls: stepToolCalls,