ai 4.1.21 → 4.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ai
2
2
 
3
+ ## 4.1.23
4
+
5
+ ### Patch Changes
6
+
7
+ - b08f7c1: fix (ai/core): suppress errors in textStream
8
+
9
+ ## 4.1.22
10
+
11
+ ### Patch Changes
12
+
13
+ - 2bec72a: feat (ai/core): add onError callback to streamText
14
+
3
15
  ## 4.1.21
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2322,6 +2322,7 @@ If set and supported by the model, calls will generate deterministic results.
2322
2322
  @param experimental_generateMessageId - Generate a unique ID for each message.
2323
2323
 
2324
2324
  @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2325
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2325
2326
  @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2326
2327
  @param onFinish - Callback that is called when the LLM response and all request tool executions
2327
2328
  (for tools that have an `execute` function) are finished.
@@ -2329,7 +2330,7 @@ If set and supported by the model, calls will generate deterministic results.
2329
2330
  @return
2330
2331
  A result object for accessing different stream types and additional information.
2331
2332
  */
2332
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2333
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2333
2334
  /**
2334
2335
  The language model to use.
2335
2336
  */
@@ -2410,6 +2411,12 @@ Callback that is called for each chunk of the stream. The stream processing will
2410
2411
  }>;
2411
2412
  }) => Promise<void> | void;
2412
2413
  /**
2414
+ Callback that is invoked when an error occurs during streaming. You can use it to log errors.
2415
+ */
2416
+ onError?: (event: {
2417
+ error: unknown;
2418
+ }) => Promise<void> | void;
2419
+ /**
2413
2420
  Callback that is called when the LLM response and all request tool executions
2414
2421
  (for tools that have an `execute` function) are finished.
2415
2422
 
package/dist/index.d.ts CHANGED
@@ -2322,6 +2322,7 @@ If set and supported by the model, calls will generate deterministic results.
2322
2322
  @param experimental_generateMessageId - Generate a unique ID for each message.
2323
2323
 
2324
2324
  @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2325
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2325
2326
  @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2326
2327
  @param onFinish - Callback that is called when the LLM response and all request tool executions
2327
2328
  (for tools that have an `execute` function) are finished.
@@ -2329,7 +2330,7 @@ If set and supported by the model, calls will generate deterministic results.
2329
2330
  @return
2330
2331
  A result object for accessing different stream types and additional information.
2331
2332
  */
2332
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2333
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
2333
2334
  /**
2334
2335
  The language model to use.
2335
2336
  */
@@ -2410,6 +2411,12 @@ Callback that is called for each chunk of the stream. The stream processing will
2410
2411
  }>;
2411
2412
  }) => Promise<void> | void;
2412
2413
  /**
2414
+ Callback that is invoked when an error occurs during streaming. You can use it to log errors.
2415
+ */
2416
+ onError?: (event: {
2417
+ error: unknown;
2418
+ }) => Promise<void> | void;
2419
+ /**
2413
2420
  Callback that is called when the LLM response and all request tool executions
2414
2421
  (for tools that have an `execute` function) are finished.
2415
2422
 
package/dist/index.js CHANGED
@@ -4701,6 +4701,7 @@ function streamText({
4701
4701
  experimental_repairToolCall: repairToolCall,
4702
4702
  experimental_transform: transform,
4703
4703
  onChunk,
4704
+ onError,
4704
4705
  onFinish,
4705
4706
  onStepFinish,
4706
4707
  _internal: {
@@ -4731,6 +4732,7 @@ function streamText({
4731
4732
  continueSteps,
4732
4733
  providerOptions,
4733
4734
  onChunk,
4735
+ onError,
4734
4736
  onFinish,
4735
4737
  onStepFinish,
4736
4738
  now: now2,
@@ -4812,6 +4814,7 @@ var DefaultStreamTextResult = class {
4812
4814
  continueSteps,
4813
4815
  providerOptions,
4814
4816
  onChunk,
4817
+ onError,
4815
4818
  onFinish,
4816
4819
  onStepFinish,
4817
4820
  now: now2,
@@ -4863,6 +4866,9 @@ var DefaultStreamTextResult = class {
4863
4866
  if (part.type === "text-delta" || part.type === "reasoning" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
4864
4867
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
4865
4868
  }
4869
+ if (part.type === "error") {
4870
+ await (onError == null ? void 0 : onError({ error: part.error }));
4871
+ }
4866
4872
  if (part.type === "text-delta") {
4867
4873
  recordedStepText += part.textDelta;
4868
4874
  recordedContinuationText += part.textDelta;
@@ -5498,8 +5504,6 @@ var DefaultStreamTextResult = class {
5498
5504
  transform({ part }, controller) {
5499
5505
  if (part.type === "text-delta") {
5500
5506
  controller.enqueue(part.textDelta);
5501
- } else if (part.type === "error") {
5502
- controller.error(part.error);
5503
5507
  }
5504
5508
  }
5505
5509
  })