ai 3.3.37 → 3.3.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # ai
2
2
 
3
+ ## 3.3.39
4
+
5
+ ### Patch Changes
6
+
7
+ - 33cf3e1: feat (ai/core): add providerMetadata to StepResult
8
+ - 17ee757: feat (ai/core): add onStepFinish callback to generateText
9
+
10
+ ## 3.3.38
11
+
12
+ ### Patch Changes
13
+
14
+ - 83da52c: feat (ai/core): add onStepFinish callback to streamText
15
+
3
16
  ## 3.3.37
4
17
 
5
18
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1356,6 +1356,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1356
1356
  Additional response information.
1357
1357
  */
1358
1358
  readonly response: LanguageModelResponseMetadataWithHeaders;
1359
+ /**
1360
+ Additional provider-specific metadata. They are passed through
1361
+ from the provider to the AI SDK and enable provider-specific
1362
+ results that can be fully encapsulated in the provider.
1363
+ */
1364
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1359
1365
  };
1360
1366
 
1361
1367
  /**
@@ -1479,10 +1485,12 @@ If set and supported by the model, calls will generate deterministic results.
1479
1485
 
1480
1486
  @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1481
1487
 
1488
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1489
+
1482
1490
  @returns
1483
1491
  A result object that contains the generated text, the results of the tool calls, and additional information.
1484
1492
  */
1485
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1493
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1486
1494
  /**
1487
1495
  The language model to use.
1488
1496
  */
@@ -1532,6 +1540,10 @@ to the provider from the AI SDK and enable provider-specific
1532
1540
  functionality that can be fully encapsulated in the provider.
1533
1541
  */
1534
1542
  experimental_providerMetadata?: ProviderMetadata;
1543
+ /**
1544
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1545
+ */
1546
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1535
1547
  /**
1536
1548
  * Internal. For test use only. May change without notice.
1537
1549
  */
@@ -1800,13 +1812,14 @@ If set and supported by the model, calls will generate deterministic results.
1800
1812
  @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1801
1813
 
1802
1814
  @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
1815
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1803
1816
  @param onFinish - Callback that is called when the LLM response and all request tool executions
1804
1817
  (for tools that have an `execute` function) are finished.
1805
1818
 
1806
1819
  @return
1807
1820
  A result object for accessing different stream types and additional information.
1808
1821
  */
1809
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1822
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1810
1823
  /**
1811
1824
  The language model to use.
1812
1825
  */
@@ -1867,58 +1880,19 @@ Callback that is called for each chunk of the stream. The stream processing will
1867
1880
  /**
1868
1881
  Callback that is called when the LLM response and all request tool executions
1869
1882
  (for tools that have an `execute` function) are finished.
1883
+
1884
+ The usage is the combined usage of all steps.
1870
1885
  */
1871
- onFinish?: (event: {
1886
+ onFinish?: (event: StepResult<TOOLS> & {
1872
1887
  /**
1873
- The reason why the generation finished.
1874
- */
1875
- finishReason: FinishReason;
1876
- /**
1877
- The token usage of the generated response.
1878
- */
1879
- usage: LanguageModelUsage$1;
1880
- /**
1881
- The full text that has been generated.
1882
- */
1883
- text: string;
1884
- /**
1885
- The tool calls that have been executed.
1886
- */
1887
- toolCalls?: ToToolCall<TOOLS>[];
1888
- /**
1889
- The tool results that have been generated.
1890
- */
1891
- toolResults?: ToToolResult<TOOLS>[];
1892
- /**
1893
- Optional raw response data.
1894
-
1895
- @deprecated Use `response` instead.
1896
- */
1897
- rawResponse?: {
1898
- /**
1899
- Response headers.
1900
- */
1901
- headers?: Record<string, string>;
1902
- };
1903
- /**
1904
- Response metadata.
1905
- */
1906
- response: LanguageModelResponseMetadataWithHeaders;
1907
- /**
1908
- Details for all steps.
1909
- */
1888
+ Details for all steps.
1889
+ */
1910
1890
  steps: StepResult<TOOLS>[];
1911
- /**
1912
- Warnings from the model provider (e.g. unsupported settings).
1913
- */
1914
- warnings?: CallWarning[];
1915
- /**
1916
- Additional provider-specific metadata. They are passed through
1917
- from the provider to the AI SDK and enable provider-specific
1918
- results that can be fully encapsulated in the provider.
1919
- */
1920
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
1921
1891
  }) => Promise<void> | void;
1892
+ /**
1893
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1894
+ */
1895
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1922
1896
  /**
1923
1897
  * Internal. For test use only. May change without notice.
1924
1898
  */
package/dist/index.d.ts CHANGED
@@ -1356,6 +1356,12 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1356
1356
  Additional response information.
1357
1357
  */
1358
1358
  readonly response: LanguageModelResponseMetadataWithHeaders;
1359
+ /**
1360
+ Additional provider-specific metadata. They are passed through
1361
+ from the provider to the AI SDK and enable provider-specific
1362
+ results that can be fully encapsulated in the provider.
1363
+ */
1364
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1359
1365
  };
1360
1366
 
1361
1367
  /**
@@ -1479,10 +1485,12 @@ If set and supported by the model, calls will generate deterministic results.
1479
1485
 
1480
1486
  @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1481
1487
 
1488
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1489
+
1482
1490
  @returns
1483
1491
  A result object that contains the generated text, the results of the tool calls, and additional information.
1484
1492
  */
1485
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1493
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1486
1494
  /**
1487
1495
  The language model to use.
1488
1496
  */
@@ -1532,6 +1540,10 @@ to the provider from the AI SDK and enable provider-specific
1532
1540
  functionality that can be fully encapsulated in the provider.
1533
1541
  */
1534
1542
  experimental_providerMetadata?: ProviderMetadata;
1543
+ /**
1544
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1545
+ */
1546
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1535
1547
  /**
1536
1548
  * Internal. For test use only. May change without notice.
1537
1549
  */
@@ -1800,13 +1812,14 @@ If set and supported by the model, calls will generate deterministic results.
1800
1812
  @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1801
1813
 
1802
1814
  @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
1815
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1803
1816
  @param onFinish - Callback that is called when the LLM response and all request tool executions
1804
1817
  (for tools that have an `execute` function) are finished.
1805
1818
 
1806
1819
  @return
1807
1820
  A result object for accessing different stream types and additional information.
1808
1821
  */
1809
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1822
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1810
1823
  /**
1811
1824
  The language model to use.
1812
1825
  */
@@ -1867,58 +1880,19 @@ Callback that is called for each chunk of the stream. The stream processing will
1867
1880
  /**
1868
1881
  Callback that is called when the LLM response and all request tool executions
1869
1882
  (for tools that have an `execute` function) are finished.
1883
+
1884
+ The usage is the combined usage of all steps.
1870
1885
  */
1871
- onFinish?: (event: {
1886
+ onFinish?: (event: StepResult<TOOLS> & {
1872
1887
  /**
1873
- The reason why the generation finished.
1874
- */
1875
- finishReason: FinishReason;
1876
- /**
1877
- The token usage of the generated response.
1878
- */
1879
- usage: LanguageModelUsage$1;
1880
- /**
1881
- The full text that has been generated.
1882
- */
1883
- text: string;
1884
- /**
1885
- The tool calls that have been executed.
1886
- */
1887
- toolCalls?: ToToolCall<TOOLS>[];
1888
- /**
1889
- The tool results that have been generated.
1890
- */
1891
- toolResults?: ToToolResult<TOOLS>[];
1892
- /**
1893
- Optional raw response data.
1894
-
1895
- @deprecated Use `response` instead.
1896
- */
1897
- rawResponse?: {
1898
- /**
1899
- Response headers.
1900
- */
1901
- headers?: Record<string, string>;
1902
- };
1903
- /**
1904
- Response metadata.
1905
- */
1906
- response: LanguageModelResponseMetadataWithHeaders;
1907
- /**
1908
- Details for all steps.
1909
- */
1888
+ Details for all steps.
1889
+ */
1910
1890
  steps: StepResult<TOOLS>[];
1911
- /**
1912
- Warnings from the model provider (e.g. unsupported settings).
1913
- */
1914
- warnings?: CallWarning[];
1915
- /**
1916
- Additional provider-specific metadata. They are passed through
1917
- from the provider to the AI SDK and enable provider-specific
1918
- results that can be fully encapsulated in the provider.
1919
- */
1920
- readonly experimental_providerMetadata: ProviderMetadata | undefined;
1921
1891
  }) => Promise<void> | void;
1892
+ /**
1893
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1894
+ */
1895
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1922
1896
  /**
1923
1897
  * Internal. For test use only. May change without notice.
1924
1898
  */
package/dist/index.js CHANGED
@@ -3059,6 +3059,7 @@ async function generateText({
3059
3059
  generateId: generateId3 = originalGenerateId3,
3060
3060
  currentDate = () => /* @__PURE__ */ new Date()
3061
3061
  } = {},
3062
+ onStepFinish,
3062
3063
  ...settings
3063
3064
  }) {
3064
3065
  var _a11;
@@ -3220,7 +3221,7 @@ async function generateText({
3220
3221
  usage.completionTokens += currentUsage.completionTokens;
3221
3222
  usage.promptTokens += currentUsage.promptTokens;
3222
3223
  usage.totalTokens += currentUsage.totalTokens;
3223
- steps.push({
3224
+ const currentStep = {
3224
3225
  text: (_b = currentModelResponse.text) != null ? _b : "",
3225
3226
  toolCalls: currentToolCalls,
3226
3227
  toolResults: currentToolResults,
@@ -3231,8 +3232,11 @@ async function generateText({
3231
3232
  response: {
3232
3233
  ...currentModelResponse.response,
3233
3234
  headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
3234
- }
3235
- });
3235
+ },
3236
+ experimental_providerMetadata: currentModelResponse.providerMetadata
3237
+ };
3238
+ steps.push(currentStep);
3239
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
3236
3240
  const newResponseMessages = toResponseMessages({
3237
3241
  text: currentModelResponse.text,
3238
3242
  toolCalls: currentToolCalls,
@@ -3737,6 +3741,7 @@ async function streamText({
3737
3741
  experimental_toolCallStreaming: toolCallStreaming = false,
3738
3742
  onChunk,
3739
3743
  onFinish,
3744
+ onStepFinish,
3740
3745
  _internal: {
3741
3746
  now: now2 = now,
3742
3747
  generateId: generateId3 = originalGenerateId4,
@@ -3869,6 +3874,7 @@ async function streamText({
3869
3874
  rawResponse,
3870
3875
  onChunk,
3871
3876
  onFinish,
3877
+ onStepFinish,
3872
3878
  rootSpan,
3873
3879
  doStreamSpan,
3874
3880
  telemetry,
@@ -3891,6 +3897,7 @@ var DefaultStreamTextResult = class {
3891
3897
  rawResponse,
3892
3898
  onChunk,
3893
3899
  onFinish,
3900
+ onStepFinish,
3894
3901
  rootSpan,
3895
3902
  doStreamSpan,
3896
3903
  telemetry,
@@ -4043,26 +4050,7 @@ var DefaultStreamTextResult = class {
4043
4050
  },
4044
4051
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4045
4052
  async flush(controller) {
4046
- controller.enqueue({
4047
- type: "step-finish",
4048
- finishReason: stepFinishReason,
4049
- usage: stepUsage,
4050
- experimental_providerMetadata: stepProviderMetadata,
4051
- logprobs: stepLogProbs,
4052
- response: stepResponse
4053
- });
4054
- stepResults.push({
4055
- text: stepText,
4056
- toolCalls: stepToolCalls,
4057
- toolResults: stepToolResults,
4058
- finishReason: stepFinishReason,
4059
- usage: stepUsage,
4060
- warnings: self.warnings,
4061
- logprobs: stepLogProbs,
4062
- response: stepResponse,
4063
- rawResponse: self.rawResponse
4064
- });
4065
- const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4053
+ const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4066
4054
  try {
4067
4055
  doStreamSpan2.setAttributes(
4068
4056
  selectTelemetryAttributes({
@@ -4071,7 +4059,7 @@ var DefaultStreamTextResult = class {
4071
4059
  "ai.response.finishReason": stepFinishReason,
4072
4060
  "ai.response.text": { output: () => stepText },
4073
4061
  "ai.response.toolCalls": {
4074
- output: () => telemetryToolCalls
4062
+ output: () => stepToolCallsJson
4075
4063
  },
4076
4064
  "ai.response.id": stepResponse.id,
4077
4065
  "ai.response.model": stepResponse.modelId,
@@ -4082,7 +4070,7 @@ var DefaultStreamTextResult = class {
4082
4070
  "ai.finishReason": stepFinishReason,
4083
4071
  "ai.result.text": { output: () => stepText },
4084
4072
  "ai.result.toolCalls": {
4085
- output: () => telemetryToolCalls
4073
+ output: () => stepToolCallsJson
4086
4074
  },
4087
4075
  // standardized gen-ai llm span attributes:
4088
4076
  "gen_ai.response.finish_reasons": [stepFinishReason],
@@ -4097,6 +4085,28 @@ var DefaultStreamTextResult = class {
4097
4085
  } finally {
4098
4086
  doStreamSpan2.end();
4099
4087
  }
4088
+ controller.enqueue({
4089
+ type: "step-finish",
4090
+ finishReason: stepFinishReason,
4091
+ usage: stepUsage,
4092
+ experimental_providerMetadata: stepProviderMetadata,
4093
+ logprobs: stepLogProbs,
4094
+ response: stepResponse
4095
+ });
4096
+ const stepResult = {
4097
+ text: stepText,
4098
+ toolCalls: stepToolCalls,
4099
+ toolResults: stepToolResults,
4100
+ finishReason: stepFinishReason,
4101
+ usage: stepUsage,
4102
+ warnings: self.warnings,
4103
+ logprobs: stepLogProbs,
4104
+ response: stepResponse,
4105
+ rawResponse: self.rawResponse,
4106
+ experimental_providerMetadata: stepProviderMetadata
4107
+ };
4108
+ stepResults.push(stepResult);
4109
+ await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
4100
4110
  const combinedUsage = {
4101
4111
  promptTokens: usage.promptTokens + stepUsage.promptTokens,
4102
4112
  completionTokens: usage.completionTokens + stepUsage.completionTokens,
@@ -4154,7 +4164,7 @@ var DefaultStreamTextResult = class {
4154
4164
  "ai.response.finishReason": stepFinishReason,
4155
4165
  "ai.response.text": { output: () => stepText },
4156
4166
  "ai.response.toolCalls": {
4157
- output: () => telemetryToolCalls
4167
+ output: () => stepToolCallsJson
4158
4168
  },
4159
4169
  "ai.usage.promptTokens": combinedUsage.promptTokens,
4160
4170
  "ai.usage.completionTokens": combinedUsage.completionTokens,
@@ -4162,7 +4172,7 @@ var DefaultStreamTextResult = class {
4162
4172
  "ai.finishReason": stepFinishReason,
4163
4173
  "ai.result.text": { output: () => stepText },
4164
4174
  "ai.result.toolCalls": {
4165
- output: () => telemetryToolCalls
4175
+ output: () => stepToolCallsJson
4166
4176
  }
4167
4177
  }
4168
4178
  })
@@ -4180,6 +4190,7 @@ var DefaultStreamTextResult = class {
4180
4190
  resolveSteps(stepResults);
4181
4191
  await (onFinish == null ? void 0 : onFinish({
4182
4192
  finishReason: stepFinishReason,
4193
+ logprobs: stepLogProbs,
4183
4194
  usage: combinedUsage,
4184
4195
  text: stepText,
4185
4196
  toolCalls: stepToolCalls,