ai 5.0.0-canary.12 → 5.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { SharedV2ProviderOptions, LanguageModelV2Usage, LanguageModelV2Source, JSONValue as JSONValue$1, JSONObject, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
1
+ import { SharedV2ProviderOptions, LanguageModelV2Source, LanguageModelV2Usage, LanguageModelV2FinishReason, JSONValue as JSONValue$1, JSONObject, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ToolCall, ToolResult, Validator } from '@ai-sdk/provider-utils';
4
4
  import { JSONSchema7 } from 'json-schema';
@@ -117,26 +117,6 @@ interface ReasoningPart {
117
117
  */
118
118
  text: string;
119
119
  /**
120
- An optional signature for verifying that the reasoning originated from the model.
121
- */
122
- signature?: string;
123
- /**
124
- Additional provider-specific metadata. They are passed through
125
- to the provider from the AI SDK and enable provider-specific
126
- functionality that can be fully encapsulated in the provider.
127
- */
128
- providerOptions?: ProviderOptions;
129
- }
130
- /**
131
- Redacted reasoning content part of a prompt.
132
- */
133
- interface RedactedReasoningPart {
134
- type: 'redacted-reasoning';
135
- /**
136
- Redacted reasoning data.
137
- */
138
- data: string;
139
- /**
140
120
  Additional provider-specific metadata. They are passed through
141
121
  to the provider from the AI SDK and enable provider-specific
142
122
  functionality that can be fully encapsulated in the provider.
@@ -251,7 +231,7 @@ type CoreAssistantMessage = {
251
231
  Content of an assistant message.
252
232
  It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
253
233
  */
254
- type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
234
+ type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | ToolCallPart>;
255
235
  /**
256
236
  A tool message. It contains the result of one or more tool calls.
257
237
  */
@@ -275,6 +255,10 @@ It can be a user message, an assistant message, or a tool message.
275
255
  */
276
256
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
277
257
 
258
+ /**
259
+ A source that has been used as input to generate the response.
260
+ */
261
+ type Source = LanguageModelV2Source;
278
262
  /**
279
263
  Tool choice for the generation. It supports the following settings:
280
264
 
@@ -418,14 +402,10 @@ type ReasoningUIPart = {
418
402
  * The reasoning text.
419
403
  */
420
404
  reasoning: string;
421
- details: Array<{
422
- type: 'text';
423
- text: string;
424
- signature?: string;
425
- } | {
426
- type: 'redacted';
427
- data: string;
428
- }>;
405
+ /**
406
+ * The provider metadata.
407
+ */
408
+ providerMetadata?: Record<string, any>;
429
409
  };
430
410
  /**
431
411
  * A tool invocation part of a message.
@@ -477,6 +457,81 @@ type JSONValue = null | string | number | boolean | {
477
457
  [value: string]: JSONValue;
478
458
  } | Array<JSONValue>;
479
459
 
460
+ type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
461
+ interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
462
+ code: CODE;
463
+ name: NAME;
464
+ parse: (value: JSONValue) => {
465
+ type: NAME;
466
+ value: TYPE;
467
+ };
468
+ }
469
+ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "toolName" | "args">>, DataStreamPart<"b", "tool_call_streaming_start", {
470
+ toolCallId: string;
471
+ toolName: string;
472
+ }>, DataStreamPart<"c", "tool_call_delta", {
473
+ toolCallId: string;
474
+ argsTextDelta: string;
475
+ }>, DataStreamPart<"d", "finish_message", {
476
+ finishReason: LanguageModelV2FinishReason;
477
+ usage?: {
478
+ promptTokens: number;
479
+ completionTokens: number;
480
+ };
481
+ }>, DataStreamPart<"e", "finish_step", {
482
+ isContinued: boolean;
483
+ finishReason: LanguageModelV2FinishReason;
484
+ usage?: {
485
+ promptTokens: number;
486
+ completionTokens: number;
487
+ };
488
+ }>, DataStreamPart<"f", "start_step", {
489
+ messageId: string;
490
+ }>, DataStreamPart<"g", "reasoning", {
491
+ text: string;
492
+ providerMetadata?: Record<string, any> | undefined;
493
+ }>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"l", "reasoning_part_finish", {}>, DataStreamPart<"k", "file", {
494
+ data: string;
495
+ mimeType: string;
496
+ }>];
497
+ type DataStreamParts = (typeof dataStreamParts)[number];
498
+ /**
499
+ * Maps the type of a stream part to its value type.
500
+ */
501
+ type DataStreamPartValueType = {
502
+ [P in DataStreamParts as P['name']]: ReturnType<P['parse']>['value'];
503
+ };
504
+ /**
505
+ * The map of prefixes for data in the stream
506
+ *
507
+ * - 0: Text from the LLM response
508
+ * - 1: (OpenAI) function_call responses
509
+ * - 2: custom JSON added by the user using `Data`
510
+ * - 6: (OpenAI) tool_call responses
511
+ *
512
+ * Example:
513
+ * ```
514
+ * 0:Vercel
515
+ * 0:'s
516
+ * 0: AI
517
+ * 0: AI
518
+ * 0: SDK
519
+ * 0: is great
520
+ * 0:!
521
+ * 2: { "someJson": "value" }
522
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
523
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
524
+ *```
525
+ */
526
+ declare const DataStreamStringPrefixes: { [K in DataStreamParts["name"]]: (typeof dataStreamParts)[number]["code"]; };
527
+ /**
528
+ Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
529
+ and appends a new line.
530
+
531
+ It ensures type-safety for the part type and value.
532
+ */
533
+ declare function formatDataStreamPart<T extends keyof DataStreamPartValueType>(type: T, value: DataStreamPartValueType[T]): DataStreamString;
534
+
480
535
  /**
481
536
  * Used to mark schemas so we can support both Zod and custom schemas.
482
537
  */
@@ -588,11 +643,6 @@ type Prompt = {
588
643
  };
589
644
 
590
645
  type StandardizedPrompt = {
591
- /**
592
- * Original prompt type. This is forwarded to the providers and can be used
593
- * to write send raw text to providers that support it.
594
- */
595
- type: 'prompt' | 'messages';
596
646
  /**
597
647
  * System message.
598
648
  */
@@ -605,7 +655,7 @@ type StandardizedPrompt = {
605
655
  declare function standardizePrompt<TOOLS extends ToolSet>({ prompt, tools, }: {
606
656
  prompt: Prompt;
607
657
  tools: undefined | TOOLS;
608
- }): StandardizedPrompt;
658
+ }): Promise<StandardizedPrompt>;
609
659
 
610
660
  type CallSettings = {
611
661
  /**
@@ -682,6 +732,52 @@ type CallSettings = {
682
732
  headers?: Record<string, string | undefined>;
683
733
  };
684
734
 
735
+ interface DataStreamWriter {
736
+ /**
737
+ * Appends a data part to the stream.
738
+ */
739
+ write(data: DataStreamString): void;
740
+ /**
741
+ * Appends a data part to the stream.
742
+ */
743
+ writeData(value: JSONValue$1): void;
744
+ /**
745
+ * Appends a message annotation to the stream.
746
+ */
747
+ writeMessageAnnotation(value: JSONValue$1): void;
748
+ /**
749
+ * Appends a source part to the stream.
750
+ */
751
+ writeSource(source: Source): void;
752
+ /**
753
+ * Merges the contents of another stream to this stream.
754
+ */
755
+ merge(stream: ReadableStream<DataStreamString>): void;
756
+ /**
757
+ * Error handler that is used by the data stream writer.
758
+ * This is intended for forwarding when merging streams
759
+ * to prevent duplicated error masking.
760
+ */
761
+ onError: ((error: unknown) => string) | undefined;
762
+ }
763
+
764
+ /**
765
+ * A stream wrapper to send custom JSON-encoded data back to the client.
766
+ *
767
+ * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
768
+ */
769
+ declare class StreamData {
770
+ private encoder;
771
+ private controller;
772
+ stream: ReadableStream<Uint8Array>;
773
+ private isClosed;
774
+ private warningTimeout;
775
+ constructor();
776
+ close(): Promise<void>;
777
+ append(value: JSONValue): void;
778
+ appendMessageAnnotation(value: JSONValue): void;
779
+ }
780
+
685
781
  declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
686
782
  tools: TOOLS | undefined;
687
783
  toolChoice: ToolChoice<TOOLS> | undefined;
@@ -723,10 +819,73 @@ declare function convertToLanguageModelPrompt({ prompt, supportedUrls, downloadI
723
819
  downloadImplementation?: typeof download;
724
820
  }): Promise<LanguageModelV2Prompt>;
725
821
 
822
+ /**
823
+ * Merges two readable streams into a single readable stream, emitting values
824
+ * from each stream as they become available.
825
+ *
826
+ * The first stream is prioritized over the second stream. If both streams have
827
+ * values available, the first stream's value is emitted first.
828
+ *
829
+ * @template VALUE1 - The type of values emitted by the first stream.
830
+ * @template VALUE2 - The type of values emitted by the second stream.
831
+ * @param {ReadableStream<VALUE1>} stream1 - The first readable stream.
832
+ * @param {ReadableStream<VALUE2>} stream2 - The second readable stream.
833
+ * @returns {ReadableStream<VALUE1 | VALUE2>} A new readable stream that emits values from both input streams.
834
+ */
835
+ declare function mergeStreams<VALUE1, VALUE2>(stream1: ReadableStream<VALUE1>, stream2: ReadableStream<VALUE2>): ReadableStream<VALUE1 | VALUE2>;
836
+
837
+ declare function prepareResponseHeaders(headers: HeadersInit | undefined, { contentType, dataStreamVersion, }: {
838
+ contentType: string;
839
+ dataStreamVersion?: 'v1' | undefined;
840
+ }): Headers;
841
+
842
+ /**
843
+ * Configuration options and helper callback methods for stream lifecycle events.
844
+ */
845
+ interface StreamCallbacks {
846
+ /** `onStart`: Called once when the stream is initialized. */
847
+ onStart?: () => Promise<void> | void;
848
+ /**
849
+ * `onCompletion`: Called for each tokenized message.
850
+ *
851
+ * @deprecated Use `onFinal` instead.
852
+ */
853
+ onCompletion?: (completion: string) => Promise<void> | void;
854
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
855
+ onFinal?: (completion: string) => Promise<void> | void;
856
+ /** `onToken`: Called for each tokenized message. */
857
+ onToken?: (token: string) => Promise<void> | void;
858
+ /** `onText`: Called for each text chunk. */
859
+ onText?: (text: string) => Promise<void> | void;
860
+ }
861
+ /**
862
+ * Creates a transform stream that encodes input messages and invokes optional callback functions.
863
+ * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
864
+ * - `onStart`: Called once when the stream is initialized.
865
+ * - `onToken`: Called for each tokenized message.
866
+ * - `onCompletion`: Called every time a completion message is received. This can occur multiple times when using e.g. OpenAI functions
867
+ * - `onFinal`: Called once when the stream is closed with the final completion message.
868
+ *
869
+ * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
870
+ *
871
+ * @param {StreamCallbacks} [callbacks] - An object containing the callback functions.
872
+ * @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
873
+ *
874
+ * @example
875
+ * const callbacks = {
876
+ * onStart: async () => console.log('Stream started'),
877
+ * onToken: async (token) => console.log(`Token: ${token}`),
878
+ * onCompletion: async (completion) => console.log(`Completion: ${completion}`)
879
+ * onFinal: async () => data.close()
880
+ * };
881
+ * const transformer = createCallbacksTransformer(callbacks);
882
+ */
883
+ declare function createCallbacksTransformer(callbacks?: StreamCallbacks | undefined): TransformStream<string, Uint8Array>;
884
+
726
885
  /**
727
886
  * Warning time for notifying developers that a stream is hanging in dev mode
728
887
  * using a console.warn.
729
888
  */
730
889
  declare const HANGING_STREAM_WARNING_TIME_MS: number;
731
890
 
732
- export { HANGING_STREAM_WARNING_TIME_MS, calculateLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
891
+ export { DataStreamWriter, HANGING_STREAM_WARNING_TIME_MS, StreamCallbacks, StreamData, calculateLanguageModelUsage, convertToLanguageModelPrompt, createCallbacksTransformer, formatDataStreamPart, mergeStreams, prepareCallSettings, prepareResponseHeaders, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
@@ -1,4 +1,4 @@
1
- import { SharedV2ProviderOptions, LanguageModelV2Usage, LanguageModelV2Source, JSONValue as JSONValue$1, JSONObject, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
1
+ import { SharedV2ProviderOptions, LanguageModelV2Source, LanguageModelV2Usage, LanguageModelV2FinishReason, JSONValue as JSONValue$1, JSONObject, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
2
2
  import { z } from 'zod';
3
3
  import { ToolCall, ToolResult, Validator } from '@ai-sdk/provider-utils';
4
4
  import { JSONSchema7 } from 'json-schema';
@@ -117,26 +117,6 @@ interface ReasoningPart {
117
117
  */
118
118
  text: string;
119
119
  /**
120
- An optional signature for verifying that the reasoning originated from the model.
121
- */
122
- signature?: string;
123
- /**
124
- Additional provider-specific metadata. They are passed through
125
- to the provider from the AI SDK and enable provider-specific
126
- functionality that can be fully encapsulated in the provider.
127
- */
128
- providerOptions?: ProviderOptions;
129
- }
130
- /**
131
- Redacted reasoning content part of a prompt.
132
- */
133
- interface RedactedReasoningPart {
134
- type: 'redacted-reasoning';
135
- /**
136
- Redacted reasoning data.
137
- */
138
- data: string;
139
- /**
140
120
  Additional provider-specific metadata. They are passed through
141
121
  to the provider from the AI SDK and enable provider-specific
142
122
  functionality that can be fully encapsulated in the provider.
@@ -251,7 +231,7 @@ type CoreAssistantMessage = {
251
231
  Content of an assistant message.
252
232
  It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
253
233
  */
254
- type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
234
+ type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | ToolCallPart>;
255
235
  /**
256
236
  A tool message. It contains the result of one or more tool calls.
257
237
  */
@@ -275,6 +255,10 @@ It can be a user message, an assistant message, or a tool message.
275
255
  */
276
256
  type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
277
257
 
258
+ /**
259
+ A source that has been used as input to generate the response.
260
+ */
261
+ type Source = LanguageModelV2Source;
278
262
  /**
279
263
  Tool choice for the generation. It supports the following settings:
280
264
 
@@ -418,14 +402,10 @@ type ReasoningUIPart = {
418
402
  * The reasoning text.
419
403
  */
420
404
  reasoning: string;
421
- details: Array<{
422
- type: 'text';
423
- text: string;
424
- signature?: string;
425
- } | {
426
- type: 'redacted';
427
- data: string;
428
- }>;
405
+ /**
406
+ * The provider metadata.
407
+ */
408
+ providerMetadata?: Record<string, any>;
429
409
  };
430
410
  /**
431
411
  * A tool invocation part of a message.
@@ -477,6 +457,81 @@ type JSONValue = null | string | number | boolean | {
477
457
  [value: string]: JSONValue;
478
458
  } | Array<JSONValue>;
479
459
 
460
+ type DataStreamString = `${(typeof DataStreamStringPrefixes)[keyof typeof DataStreamStringPrefixes]}:${string}\n`;
461
+ interface DataStreamPart<CODE extends string, NAME extends string, TYPE> {
462
+ code: CODE;
463
+ name: NAME;
464
+ parse: (value: JSONValue) => {
465
+ type: NAME;
466
+ value: TYPE;
467
+ };
468
+ }
469
+ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, DataStreamPart<"2", "data", JSONValue[]>, DataStreamPart<"3", "error", string>, DataStreamPart<"8", "message_annotations", JSONValue[]>, DataStreamPart<"9", "tool_call", ToolCall<string, any>>, DataStreamPart<"a", "tool_result", Omit<ToolResult<string, any, any>, "toolName" | "args">>, DataStreamPart<"b", "tool_call_streaming_start", {
470
+ toolCallId: string;
471
+ toolName: string;
472
+ }>, DataStreamPart<"c", "tool_call_delta", {
473
+ toolCallId: string;
474
+ argsTextDelta: string;
475
+ }>, DataStreamPart<"d", "finish_message", {
476
+ finishReason: LanguageModelV2FinishReason;
477
+ usage?: {
478
+ promptTokens: number;
479
+ completionTokens: number;
480
+ };
481
+ }>, DataStreamPart<"e", "finish_step", {
482
+ isContinued: boolean;
483
+ finishReason: LanguageModelV2FinishReason;
484
+ usage?: {
485
+ promptTokens: number;
486
+ completionTokens: number;
487
+ };
488
+ }>, DataStreamPart<"f", "start_step", {
489
+ messageId: string;
490
+ }>, DataStreamPart<"g", "reasoning", {
491
+ text: string;
492
+ providerMetadata?: Record<string, any> | undefined;
493
+ }>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"l", "reasoning_part_finish", {}>, DataStreamPart<"k", "file", {
494
+ data: string;
495
+ mimeType: string;
496
+ }>];
497
+ type DataStreamParts = (typeof dataStreamParts)[number];
498
+ /**
499
+ * Maps the type of a stream part to its value type.
500
+ */
501
+ type DataStreamPartValueType = {
502
+ [P in DataStreamParts as P['name']]: ReturnType<P['parse']>['value'];
503
+ };
504
+ /**
505
+ * The map of prefixes for data in the stream
506
+ *
507
+ * - 0: Text from the LLM response
508
+ * - 1: (OpenAI) function_call responses
509
+ * - 2: custom JSON added by the user using `Data`
510
+ * - 6: (OpenAI) tool_call responses
511
+ *
512
+ * Example:
513
+ * ```
514
+ * 0:Vercel
515
+ * 0:'s
516
+ * 0: AI
517
+ * 0: AI
518
+ * 0: SDK
519
+ * 0: is great
520
+ * 0:!
521
+ * 2: { "someJson": "value" }
522
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
523
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
524
+ *```
525
+ */
526
+ declare const DataStreamStringPrefixes: { [K in DataStreamParts["name"]]: (typeof dataStreamParts)[number]["code"]; };
527
+ /**
528
+ Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
529
+ and appends a new line.
530
+
531
+ It ensures type-safety for the part type and value.
532
+ */
533
+ declare function formatDataStreamPart<T extends keyof DataStreamPartValueType>(type: T, value: DataStreamPartValueType[T]): DataStreamString;
534
+
480
535
  /**
481
536
  * Used to mark schemas so we can support both Zod and custom schemas.
482
537
  */
@@ -588,11 +643,6 @@ type Prompt = {
588
643
  };
589
644
 
590
645
  type StandardizedPrompt = {
591
- /**
592
- * Original prompt type. This is forwarded to the providers and can be used
593
- * to write send raw text to providers that support it.
594
- */
595
- type: 'prompt' | 'messages';
596
646
  /**
597
647
  * System message.
598
648
  */
@@ -605,7 +655,7 @@ type StandardizedPrompt = {
605
655
  declare function standardizePrompt<TOOLS extends ToolSet>({ prompt, tools, }: {
606
656
  prompt: Prompt;
607
657
  tools: undefined | TOOLS;
608
- }): StandardizedPrompt;
658
+ }): Promise<StandardizedPrompt>;
609
659
 
610
660
  type CallSettings = {
611
661
  /**
@@ -682,6 +732,52 @@ type CallSettings = {
682
732
  headers?: Record<string, string | undefined>;
683
733
  };
684
734
 
735
+ interface DataStreamWriter {
736
+ /**
737
+ * Appends a data part to the stream.
738
+ */
739
+ write(data: DataStreamString): void;
740
+ /**
741
+ * Appends a data part to the stream.
742
+ */
743
+ writeData(value: JSONValue$1): void;
744
+ /**
745
+ * Appends a message annotation to the stream.
746
+ */
747
+ writeMessageAnnotation(value: JSONValue$1): void;
748
+ /**
749
+ * Appends a source part to the stream.
750
+ */
751
+ writeSource(source: Source): void;
752
+ /**
753
+ * Merges the contents of another stream to this stream.
754
+ */
755
+ merge(stream: ReadableStream<DataStreamString>): void;
756
+ /**
757
+ * Error handler that is used by the data stream writer.
758
+ * This is intended for forwarding when merging streams
759
+ * to prevent duplicated error masking.
760
+ */
761
+ onError: ((error: unknown) => string) | undefined;
762
+ }
763
+
764
+ /**
765
+ * A stream wrapper to send custom JSON-encoded data back to the client.
766
+ *
767
+ * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
768
+ */
769
+ declare class StreamData {
770
+ private encoder;
771
+ private controller;
772
+ stream: ReadableStream<Uint8Array>;
773
+ private isClosed;
774
+ private warningTimeout;
775
+ constructor();
776
+ close(): Promise<void>;
777
+ append(value: JSONValue): void;
778
+ appendMessageAnnotation(value: JSONValue): void;
779
+ }
780
+
685
781
  declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
686
782
  tools: TOOLS | undefined;
687
783
  toolChoice: ToolChoice<TOOLS> | undefined;
@@ -723,10 +819,73 @@ declare function convertToLanguageModelPrompt({ prompt, supportedUrls, downloadI
723
819
  downloadImplementation?: typeof download;
724
820
  }): Promise<LanguageModelV2Prompt>;
725
821
 
822
+ /**
823
+ * Merges two readable streams into a single readable stream, emitting values
824
+ * from each stream as they become available.
825
+ *
826
+ * The first stream is prioritized over the second stream. If both streams have
827
+ * values available, the first stream's value is emitted first.
828
+ *
829
+ * @template VALUE1 - The type of values emitted by the first stream.
830
+ * @template VALUE2 - The type of values emitted by the second stream.
831
+ * @param {ReadableStream<VALUE1>} stream1 - The first readable stream.
832
+ * @param {ReadableStream<VALUE2>} stream2 - The second readable stream.
833
+ * @returns {ReadableStream<VALUE1 | VALUE2>} A new readable stream that emits values from both input streams.
834
+ */
835
+ declare function mergeStreams<VALUE1, VALUE2>(stream1: ReadableStream<VALUE1>, stream2: ReadableStream<VALUE2>): ReadableStream<VALUE1 | VALUE2>;
836
+
837
+ declare function prepareResponseHeaders(headers: HeadersInit | undefined, { contentType, dataStreamVersion, }: {
838
+ contentType: string;
839
+ dataStreamVersion?: 'v1' | undefined;
840
+ }): Headers;
841
+
842
+ /**
843
+ * Configuration options and helper callback methods for stream lifecycle events.
844
+ */
845
+ interface StreamCallbacks {
846
+ /** `onStart`: Called once when the stream is initialized. */
847
+ onStart?: () => Promise<void> | void;
848
+ /**
849
+ * `onCompletion`: Called for each tokenized message.
850
+ *
851
+ * @deprecated Use `onFinal` instead.
852
+ */
853
+ onCompletion?: (completion: string) => Promise<void> | void;
854
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
855
+ onFinal?: (completion: string) => Promise<void> | void;
856
+ /** `onToken`: Called for each tokenized message. */
857
+ onToken?: (token: string) => Promise<void> | void;
858
+ /** `onText`: Called for each text chunk. */
859
+ onText?: (text: string) => Promise<void> | void;
860
+ }
861
+ /**
862
+ * Creates a transform stream that encodes input messages and invokes optional callback functions.
863
+ * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
864
+ * - `onStart`: Called once when the stream is initialized.
865
+ * - `onToken`: Called for each tokenized message.
866
+ * - `onCompletion`: Called every time a completion message is received. This can occur multiple times when using e.g. OpenAI functions
867
+ * - `onFinal`: Called once when the stream is closed with the final completion message.
868
+ *
869
+ * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
870
+ *
871
+ * @param {StreamCallbacks} [callbacks] - An object containing the callback functions.
872
+ * @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
873
+ *
874
+ * @example
875
+ * const callbacks = {
876
+ * onStart: async () => console.log('Stream started'),
877
+ * onToken: async (token) => console.log(`Token: ${token}`),
878
+ * onCompletion: async (completion) => console.log(`Completion: ${completion}`)
879
+ * onFinal: async () => data.close()
880
+ * };
881
+ * const transformer = createCallbacksTransformer(callbacks);
882
+ */
883
+ declare function createCallbacksTransformer(callbacks?: StreamCallbacks | undefined): TransformStream<string, Uint8Array>;
884
+
726
885
  /**
727
886
  * Warning time for notifying developers that a stream is hanging in dev mode
728
887
  * using a console.warn.
729
888
  */
730
889
  declare const HANGING_STREAM_WARNING_TIME_MS: number;
731
890
 
732
- export { HANGING_STREAM_WARNING_TIME_MS, calculateLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
891
+ export { DataStreamWriter, HANGING_STREAM_WARNING_TIME_MS, StreamCallbacks, StreamData, calculateLanguageModelUsage, convertToLanguageModelPrompt, createCallbacksTransformer, formatDataStreamPart, mergeStreams, prepareCallSettings, prepareResponseHeaders, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };