ai 6.0.0-beta.61 → 6.0.0-beta.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { createGateway, gateway } from '@ai-sdk/gateway';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, ModelMessage, FlexibleSchema, InferSchema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, DataContent, Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, FlexibleSchema, InferSchema, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
4
  export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
5
  import * as _ai_sdk_provider from '@ai-sdk/provider';
6
6
  import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV2, TranscriptionModelV3CallWarning, LanguageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV3, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
@@ -637,6 +637,61 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
637
637
  readonly experimental_output: OUTPUT;
638
638
  }
639
639
 
640
+ /**
641
+ Create a type from an object with all keys and nested keys set to optional.
642
+ The helper supports normal objects and schemas (which are resolved automatically).
643
+ It always recurses into arrays.
644
+
645
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
646
+ */
647
+ type DeepPartial<T> = T extends FlexibleSchema ? DeepPartialInternal<InferSchema<T>> : DeepPartialInternal<T>;
648
+ type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
649
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
650
+ type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
651
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
652
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
653
+ type PartialObject<ObjectType extends object> = {
654
+ [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
655
+ };
656
+
657
+ interface Output<OUTPUT = any, PARTIAL = any> {
658
+ readonly type: 'object' | 'text';
659
+ responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
660
+ parsePartial(options: {
661
+ text: string;
662
+ }): Promise<{
663
+ partial: PARTIAL;
664
+ } | undefined>;
665
+ parseOutput(options: {
666
+ text: string;
667
+ }, context: {
668
+ response: LanguageModelResponseMetadata;
669
+ usage: LanguageModelUsage;
670
+ finishReason: FinishReason;
671
+ }): Promise<OUTPUT>;
672
+ }
673
+ declare const text: () => Output<string, string>;
674
+ declare const object: <OUTPUT>({ schema: inputSchema, }: {
675
+ schema: FlexibleSchema<OUTPUT>;
676
+ }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
677
+ type InferGenerateOutput<OUTPUT extends Output> = OUTPUT extends Output<infer T, any> ? T : never;
678
+ type InferStreamOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer P> ? P : never;
679
+
680
+ type output_InferGenerateOutput<OUTPUT extends Output> = InferGenerateOutput<OUTPUT>;
681
+ type output_InferStreamOutput<OUTPUT extends Output> = InferStreamOutput<OUTPUT>;
682
+ type output_Output<OUTPUT = any, PARTIAL = any> = Output<OUTPUT, PARTIAL>;
683
+ declare const output_object: typeof object;
684
+ declare const output_text: typeof text;
685
+ declare namespace output {
686
+ export {
687
+ output_InferGenerateOutput as InferGenerateOutput,
688
+ output_InferStreamOutput as InferStreamOutput,
689
+ output_Output as Output,
690
+ output_object as object,
691
+ output_text as text,
692
+ };
693
+ }
694
+
640
695
  type CallSettings = {
641
696
  /**
642
697
  Maximum number of tokens to generate.
@@ -806,55 +861,6 @@ type DownloadFunction = (options: Array<{
806
861
  mediaType: string | undefined;
807
862
  } | null>>;
808
863
 
809
- /**
810
- Create a type from an object with all keys and nested keys set to optional.
811
- The helper supports normal objects and schemas (which are resolved automatically).
812
- It always recurses into arrays.
813
-
814
- Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
815
- */
816
- type DeepPartial<T> = T extends FlexibleSchema ? DeepPartialInternal<InferSchema<T>> : DeepPartialInternal<T>;
817
- type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
818
- type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
819
- type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
820
- type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
821
- type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
822
- type PartialObject<ObjectType extends object> = {
823
- [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
824
- };
825
-
826
- interface Output<OUTPUT, PARTIAL> {
827
- readonly type: 'object' | 'text';
828
- responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
829
- parsePartial(options: {
830
- text: string;
831
- }): Promise<{
832
- partial: PARTIAL;
833
- } | undefined>;
834
- parseOutput(options: {
835
- text: string;
836
- }, context: {
837
- response: LanguageModelResponseMetadata;
838
- usage: LanguageModelUsage;
839
- finishReason: FinishReason;
840
- }): Promise<OUTPUT>;
841
- }
842
- declare const text: () => Output<string, string>;
843
- declare const object: <OUTPUT>({ schema: inputSchema, }: {
844
- schema: FlexibleSchema<OUTPUT>;
845
- }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
846
-
847
- type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
848
- declare const output_object: typeof object;
849
- declare const output_text: typeof text;
850
- declare namespace output {
851
- export {
852
- output_Output as Output,
853
- output_object as object,
854
- output_text as text,
855
- };
856
- }
857
-
858
864
  /**
859
865
  Function that you can use to provide different settings for a step.
860
866
 
@@ -2381,7 +2387,7 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2381
2387
  * You can implement your own Agent by implementing the `Agent` interface,
2382
2388
  * or use the `ToolLoopAgent` class.
2383
2389
  */
2384
- interface Agent<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PARTIAL = never> {
2390
+ interface Agent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2385
2391
  /**
2386
2392
  * The specification version of the agent interface. This will enable
2387
2393
  * us to evolve the agent interface and retain backwards compatibility.
@@ -2398,11 +2404,11 @@ interface Agent<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PARTIAL = nev
2398
2404
  /**
2399
2405
  * Generates an output from the agent (non-streaming).
2400
2406
  */
2401
- generate(options: Prompt): PromiseLike<GenerateTextResult<TOOLS, OUTPUT>>;
2407
+ generate(options: Prompt): PromiseLike<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2402
2408
  /**
2403
2409
  * Streams an output from the agent (streaming).
2404
2410
  */
2405
- stream(options: Prompt): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
2411
+ stream(options: Prompt): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2406
2412
  }
2407
2413
 
2408
2414
  /**
@@ -2431,7 +2437,7 @@ type ToolLoopAgentOnStepFinishCallback<TOOLS extends ToolSet = {}> = (stepResult
2431
2437
  /**
2432
2438
  * Configuration options for an agent.
2433
2439
  */
2434
- type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PARTIAL = never> = CallSettings & {
2440
+ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2435
2441
  /**
2436
2442
  * The id of the agent.
2437
2443
  */
@@ -2471,7 +2477,7 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PA
2471
2477
  /**
2472
2478
  Optional specification for parsing structured outputs from the LLM response.
2473
2479
  */
2474
- experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2480
+ experimental_output?: OUTPUT;
2475
2481
  /**
2476
2482
  * @deprecated Use `prepareStep` instead.
2477
2483
  */
@@ -2519,10 +2525,10 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PA
2519
2525
  * - A tool call needs approval, or
2520
2526
  * - A stop condition is met (default stop condition is stepCountIs(20))
2521
2527
  */
2522
- declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PARTIAL = never> implements Agent<TOOLS, OUTPUT, OUTPUT_PARTIAL> {
2528
+ declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<TOOLS, OUTPUT> {
2523
2529
  readonly version = "agent-v1";
2524
2530
  private readonly settings;
2525
- constructor(settings: ToolLoopAgentSettings<TOOLS, OUTPUT, OUTPUT_PARTIAL>);
2531
+ constructor(settings: ToolLoopAgentSettings<TOOLS, OUTPUT>);
2526
2532
  /**
2527
2533
  * The id of the agent.
2528
2534
  */
@@ -2534,17 +2540,17 @@ declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_P
2534
2540
  /**
2535
2541
  * Generates an output from the agent (non-streaming).
2536
2542
  */
2537
- generate(options: Prompt): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2543
+ generate(options: Prompt): Promise<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2538
2544
  /**
2539
2545
  * Streams an output from the agent (streaming).
2540
2546
  */
2541
- stream(options: Prompt): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
2547
+ stream(options: Prompt): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2542
2548
  }
2543
2549
 
2544
2550
  /**
2545
2551
  * Infer the type of the tools of an agent.
2546
2552
  */
2547
- type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, any, any> ? TOOLS : never;
2553
+ type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, never> ? TOOLS : never;
2548
2554
 
2549
2555
  /**
2550
2556
  * Infer the UI message type of an agent.
@@ -2552,1244 +2558,1241 @@ type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, any, any> ? TOOLS
2552
2558
  type InferAgentUIMessage<AGENT> = UIMessage<never, never, InferUITools<InferAgentTools<AGENT>>>;
2553
2559
 
2554
2560
  /**
2555
- * Runs the agent and stream the output as a UI message stream
2556
- * in the response body.
2561
+ * Runs the agent and returns a response object with a UI message stream.
2557
2562
  *
2558
2563
  * @param agent - The agent to run.
2559
2564
  * @param messages - The input UI messages.
2560
2565
  *
2561
2566
  * @returns The response object.
2562
2567
  */
2563
- declare function createAgentUIStreamResponse<TOOLS extends ToolSet = {}, OUTPUT = never, OUTPUT_PARTIAL = never>({ agent, messages, ...options }: {
2564
- agent: Agent<TOOLS, OUTPUT, OUTPUT_PARTIAL>;
2568
+ declare function createAgentUIStreamResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ headers, status, statusText, consumeSseStream, ...options }: {
2569
+ agent: Agent<TOOLS, OUTPUT>;
2565
2570
  messages: unknown[];
2566
- } & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<Response>;
2571
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<Response>;
2572
+
2573
+ declare const getOriginalFetch: () => typeof fetch;
2574
+ declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
2575
+ api: string;
2576
+ prompt: string;
2577
+ credentials: RequestCredentials | undefined;
2578
+ headers: HeadersInit | undefined;
2579
+ body: Record<string, any>;
2580
+ streamProtocol: 'data' | 'text' | undefined;
2581
+ setCompletion: (completion: string) => void;
2582
+ setLoading: (loading: boolean) => void;
2583
+ setError: (error: Error | undefined) => void;
2584
+ setAbortController: (abortController: AbortController | null) => void;
2585
+ onFinish: ((prompt: string, completion: string) => void) | undefined;
2586
+ onError: ((error: Error) => void) | undefined;
2587
+ fetch: ReturnType<typeof getOriginalFetch> | undefined;
2588
+ }): Promise<string | null | undefined>;
2567
2589
 
2568
2590
  /**
2569
- The result of an `embed` call.
2570
- It contains the embedding, the value, and additional information.
2591
+ * Transport interface for handling chat message communication and streaming.
2592
+ *
2593
+ * The `ChatTransport` interface provides fine-grained control over how messages
2594
+ * are sent to API endpoints and how responses are processed. This enables
2595
+ * alternative communication protocols like WebSockets, custom authentication
2596
+ * patterns, or specialized backend integrations.
2597
+ *
2598
+ * @template UI_MESSAGE - The UI message type extending UIMessage
2571
2599
  */
2572
- interface EmbedResult<VALUE> {
2573
- /**
2574
- The value that was embedded.
2575
- */
2576
- readonly value: VALUE;
2577
- /**
2578
- The embedding of the value.
2579
- */
2580
- readonly embedding: Embedding;
2581
- /**
2582
- The embedding token usage.
2583
- */
2584
- readonly usage: EmbeddingModelUsage;
2600
+ interface ChatTransport<UI_MESSAGE extends UIMessage> {
2585
2601
  /**
2586
- Optional provider-specific metadata.
2587
- */
2588
- readonly providerMetadata?: ProviderMetadata;
2602
+ * Sends messages to the chat API endpoint and returns a streaming response.
2603
+ *
2604
+ * This method handles both new message submission and message regeneration.
2605
+ * It supports real-time streaming of responses through UIMessageChunk events.
2606
+ *
2607
+ * @param options - Configuration object containing:
2608
+ * @param options.trigger - The type of message submission:
2609
+ * - `'submit-message'`: Submitting a new user message
2610
+ * - `'regenerate-message'`: Regenerating an assistant response
2611
+ * @param options.chatId - Unique identifier for the chat session
2612
+ * @param options.messageId - ID of the message to regenerate (for regenerate-message trigger) or undefined for new messages
2613
+ * @param options.messages - Array of UI messages representing the conversation history
2614
+ * @param options.abortSignal - Signal to abort the request if needed
2615
+ * @param options.headers - Additional HTTP headers to include in the request
2616
+ * @param options.body - Additional JSON properties to include in the request body
2617
+ * @param options.metadata - Custom metadata to attach to the request
2618
+ *
2619
+ * @returns Promise resolving to a ReadableStream of UIMessageChunk objects.
2620
+ * The stream emits various chunk types like:
2621
+ * - `text-start`, `text-delta`, `text-end`: For streaming text content
2622
+ * - `tool-input-start`, `tool-input-delta`, `tool-input-available`: For tool calls
2623
+ * - `data-part-start`, `data-part-delta`, `data-part-available`: For data parts
2624
+ * - `error`: For error handling
2625
+ *
2626
+ * @throws Error when the API request fails or response is invalid
2627
+ */
2628
+ sendMessages: (options: {
2629
+ /** The type of message submission - either new message or regeneration */
2630
+ trigger: 'submit-message' | 'regenerate-message';
2631
+ /** Unique identifier for the chat session */
2632
+ chatId: string;
2633
+ /** ID of the message to regenerate, or undefined for new messages */
2634
+ messageId: string | undefined;
2635
+ /** Array of UI messages representing the conversation history */
2636
+ messages: UI_MESSAGE[];
2637
+ /** Signal to abort the request if needed */
2638
+ abortSignal: AbortSignal | undefined;
2639
+ } & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
2589
2640
  /**
2590
- Optional response data.
2591
- */
2592
- readonly response?: {
2593
- /**
2594
- Response headers.
2595
- */
2596
- headers?: Record<string, string>;
2597
- /**
2598
- The response body.
2599
- */
2600
- body?: unknown;
2601
- };
2641
+ * Reconnects to an existing streaming response for the specified chat session.
2642
+ *
2643
+ * This method is used to resume streaming when a connection is interrupted
2644
+ * or when resuming a chat session. It's particularly useful for maintaining
2645
+ * continuity in long-running conversations or recovering from network issues.
2646
+ *
2647
+ * @param options - Configuration object containing:
2648
+ * @param options.chatId - Unique identifier for the chat session to reconnect to
2649
+ * @param options.headers - Additional HTTP headers to include in the reconnection request
2650
+ * @param options.body - Additional JSON properties to include in the request body
2651
+ * @param options.metadata - Custom metadata to attach to the request
2652
+ *
2653
+ * @returns Promise resolving to:
2654
+ * - `ReadableStream<UIMessageChunk>`: If an active stream is found and can be resumed
2655
+ * - `null`: If no active stream exists for the specified chat session (e.g., response already completed)
2656
+ *
2657
+ * @throws Error when the reconnection request fails or response is invalid
2658
+ */
2659
+ reconnectToStream: (options: {
2660
+ /** Unique identifier for the chat session to reconnect to */
2661
+ chatId: string;
2662
+ } & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
2602
2663
  }
2603
2664
 
2665
+ type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
2666
+ id?: UI_MESSAGE['id'];
2667
+ role?: UI_MESSAGE['role'];
2668
+ };
2669
+ type UIDataPartSchemas = Record<string, FlexibleSchema>;
2670
+ type UIDataTypesToSchemas<T extends UIDataTypes> = {
2671
+ [K in keyof T]: FlexibleSchema<T[K]>;
2672
+ };
2673
+ type InferUIDataParts<T extends UIDataPartSchemas> = {
2674
+ [K in keyof T]: InferSchema<T[K]>;
2675
+ };
2676
+ type ChatRequestOptions = {
2677
+ /**
2678
+ Additional headers that should be to be passed to the API endpoint.
2679
+ */
2680
+ headers?: Record<string, string> | Headers;
2681
+ /**
2682
+ Additional body JSON properties that should be sent to the API endpoint.
2683
+ */
2684
+ body?: object;
2685
+ metadata?: unknown;
2686
+ };
2604
2687
  /**
2605
- Embed a value using an embedding model. The type of the value is defined by the embedding model.
2606
-
2607
- @param model - The embedding model to use.
2608
- @param value - The value that should be embedded.
2609
-
2610
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2611
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2612
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2613
-
2614
- @returns A result object that contains the embedding, the value, and additional information.
2688
+ * Function that can be called to add a tool approval response to the chat.
2615
2689
  */
2616
- declare function embed<VALUE = string>({ model: modelArg, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
2617
- /**
2618
- The embedding model to use.
2619
- */
2620
- model: EmbeddingModel<VALUE>;
2690
+ type ChatAddToolApproveResponseFunction = ({ id, approved, reason, }: {
2691
+ id: string;
2621
2692
  /**
2622
- The value that should be embedded.
2693
+ * Flag indicating whether the approval was granted or denied.
2623
2694
  */
2624
- value: VALUE;
2695
+ approved: boolean;
2625
2696
  /**
2626
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
2627
-
2628
- @default 2
2629
- */
2630
- maxRetries?: number;
2631
- /**
2632
- Abort signal.
2633
- */
2634
- abortSignal?: AbortSignal;
2635
- /**
2636
- Additional headers to include in the request.
2637
- Only applicable for HTTP-based providers.
2638
- */
2639
- headers?: Record<string, string>;
2640
- /**
2641
- Additional provider-specific options. They are passed through
2642
- to the provider from the AI SDK and enable provider-specific
2643
- functionality that can be fully encapsulated in the provider.
2644
- */
2645
- providerOptions?: ProviderOptions;
2646
- /**
2647
- * Optional telemetry configuration (experimental).
2697
+ * Optional reason for the approval or denial.
2648
2698
  */
2649
- experimental_telemetry?: TelemetrySettings;
2650
- }): Promise<EmbedResult<VALUE>>;
2651
-
2699
+ reason?: string;
2700
+ }) => void | PromiseLike<void>;
2701
+ type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
2702
+ interface ChatState<UI_MESSAGE extends UIMessage> {
2703
+ status: ChatStatus;
2704
+ error: Error | undefined;
2705
+ messages: UI_MESSAGE[];
2706
+ pushMessage: (message: UI_MESSAGE) => void;
2707
+ popMessage: () => void;
2708
+ replaceMessage: (index: number, message: UI_MESSAGE) => void;
2709
+ snapshot: <T>(thing: T) => T;
2710
+ }
2711
+ type ChatOnErrorCallback = (error: Error) => void;
2712
+ type ChatOnToolCallCallback<UI_MESSAGE extends UIMessage = UIMessage> = (options: {
2713
+ toolCall: InferUIMessageToolCall<UI_MESSAGE>;
2714
+ }) => void | PromiseLike<void>;
2715
+ type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
2652
2716
  /**
2653
- The result of a `embedMany` call.
2654
- It contains the embeddings, the values, and additional information.
2717
+ * Function that is called when the assistant response has finished streaming.
2718
+ *
2719
+ * @param message The assistant message that was streamed.
2720
+ * @param messages The full chat history, including the assistant message.
2721
+ *
2722
+ * @param isAbort Indicates whether the request has been aborted.
2723
+ * @param isDisconnect Indicates whether the request has been ended by a network error.
2724
+ * @param isError Indicates whether the request has been ended by an error.
2655
2725
  */
2656
- interface EmbedManyResult<VALUE> {
2726
+ type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
2727
+ message: UI_MESSAGE;
2728
+ messages: UI_MESSAGE[];
2729
+ isAbort: boolean;
2730
+ isDisconnect: boolean;
2731
+ isError: boolean;
2732
+ }) => void;
2733
+ interface ChatInit<UI_MESSAGE extends UIMessage> {
2657
2734
  /**
2658
- The values that were embedded.
2659
- */
2660
- readonly values: Array<VALUE>;
2735
+ * A unique identifier for the chat. If not provided, a random one will be
2736
+ * generated.
2737
+ */
2738
+ id?: string;
2739
+ messageMetadataSchema?: FlexibleSchema<InferUIMessageMetadata<UI_MESSAGE>>;
2740
+ dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
2741
+ messages?: UI_MESSAGE[];
2661
2742
  /**
2662
- The embeddings. They are in the same order as the values.
2663
- */
2664
- readonly embeddings: Array<Embedding>;
2743
+ * A way to provide a function that is going to be used for ids for messages and the chat.
2744
+ * If not provided the default AI SDK `generateId` is used.
2745
+ */
2746
+ generateId?: IdGenerator;
2747
+ transport?: ChatTransport<UI_MESSAGE>;
2665
2748
  /**
2666
- The embedding token usage.
2667
- */
2668
- readonly usage: EmbeddingModelUsage;
2749
+ * Callback function to be called when an error is encountered.
2750
+ */
2751
+ onError?: ChatOnErrorCallback;
2669
2752
  /**
2670
- Optional provider-specific metadata.
2753
+ Optional callback function that is invoked when a tool call is received.
2754
+ Intended for automatic client-side tool execution.
2755
+
2756
+ You can optionally return a result for the tool call,
2757
+ either synchronously or asynchronously.
2671
2758
  */
2672
- readonly providerMetadata?: ProviderMetadata;
2759
+ onToolCall?: ChatOnToolCallCallback<UI_MESSAGE>;
2673
2760
  /**
2674
- Optional raw response data.
2675
- */
2676
- readonly responses?: Array<{
2677
- /**
2678
- Response headers.
2679
- */
2680
- headers?: Record<string, string>;
2681
- /**
2682
- The response body.
2683
- */
2684
- body?: unknown;
2685
- } | undefined>;
2686
- }
2687
-
2688
- /**
2689
- Embed several values using an embedding model. The type of the value is defined
2690
- by the embedding model.
2691
-
2692
- `embedMany` automatically splits large requests into smaller chunks if the model
2693
- has a limit on how many embeddings can be generated in a single call.
2694
-
2695
- @param model - The embedding model to use.
2696
- @param values - The values that should be embedded.
2697
-
2698
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2699
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2700
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2701
-
2702
- @returns A result object that contains the embeddings, the value, and additional information.
2703
- */
2704
- declare function embedMany<VALUE = string>({ model: modelArg, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
2761
+ * Function that is called when the assistant response has finished streaming.
2762
+ */
2763
+ onFinish?: ChatOnFinishCallback<UI_MESSAGE>;
2705
2764
  /**
2706
- The embedding model to use.
2707
- */
2708
- model: EmbeddingModel<VALUE>;
2765
+ * Optional callback function that is called when a data part is received.
2766
+ *
2767
+ * @param data The data part that was received.
2768
+ */
2769
+ onData?: ChatOnDataCallback<UI_MESSAGE>;
2709
2770
  /**
2710
- The values that should be embedded.
2771
+ * When provided, this function will be called when the stream is finished or a tool call is added
2772
+ * to determine if the current messages should be resubmitted.
2711
2773
  */
2712
- values: Array<VALUE>;
2774
+ sendAutomaticallyWhen?: (options: {
2775
+ messages: UI_MESSAGE[];
2776
+ }) => boolean | PromiseLike<boolean>;
2777
+ }
2778
+ declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
2779
+ readonly id: string;
2780
+ readonly generateId: IdGenerator;
2781
+ protected state: ChatState<UI_MESSAGE>;
2782
+ private messageMetadataSchema;
2783
+ private dataPartSchemas;
2784
+ private readonly transport;
2785
+ private onError?;
2786
+ private onToolCall?;
2787
+ private onFinish?;
2788
+ private onData?;
2789
+ private sendAutomaticallyWhen?;
2790
+ private activeResponse;
2791
+ private jobExecutor;
2792
+ constructor({ generateId, id, transport, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, sendAutomaticallyWhen, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
2793
+ state: ChatState<UI_MESSAGE>;
2794
+ });
2713
2795
  /**
2714
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
2715
-
2716
- @default 2
2796
+ * Hook status:
2797
+ *
2798
+ * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
2799
+ * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
2800
+ * - `ready`: The full response has been received and processed; a new user message can be submitted.
2801
+ * - `error`: An error occurred during the API request, preventing successful completion.
2717
2802
  */
2718
- maxRetries?: number;
2803
+ get status(): ChatStatus;
2804
+ protected setStatus({ status, error, }: {
2805
+ status: ChatStatus;
2806
+ error?: Error;
2807
+ }): void;
2808
+ get error(): Error | undefined;
2809
+ get messages(): UI_MESSAGE[];
2810
+ get lastMessage(): UI_MESSAGE | undefined;
2811
+ set messages(messages: UI_MESSAGE[]);
2719
2812
  /**
2720
- Abort signal.
2721
- */
2722
- abortSignal?: AbortSignal;
2813
+ * Appends or replaces a user message to the chat list. This triggers the API call to fetch
2814
+ * the assistant's response.
2815
+ *
2816
+ * If a messageId is provided, the message will be replaced.
2817
+ */
2818
+ sendMessage: (message?: (CreateUIMessage<UI_MESSAGE> & {
2819
+ text?: never;
2820
+ files?: never;
2821
+ messageId?: string;
2822
+ }) | {
2823
+ text: string;
2824
+ files?: FileList | FileUIPart[];
2825
+ metadata?: InferUIMessageMetadata<UI_MESSAGE>;
2826
+ parts?: never;
2827
+ messageId?: string;
2828
+ } | {
2829
+ files: FileList | FileUIPart[];
2830
+ metadata?: InferUIMessageMetadata<UI_MESSAGE>;
2831
+ parts?: never;
2832
+ messageId?: string;
2833
+ }, options?: ChatRequestOptions) => Promise<void>;
2723
2834
  /**
2724
- Additional headers to include in the request.
2725
- Only applicable for HTTP-based providers.
2726
- */
2727
- headers?: Record<string, string>;
2835
+ * Regenerate the assistant message with the provided message id.
2836
+ * If no message id is provided, the last assistant message will be regenerated.
2837
+ */
2838
+ regenerate: ({ messageId, ...options }?: {
2839
+ messageId?: string;
2840
+ } & ChatRequestOptions) => Promise<void>;
2728
2841
  /**
2729
- * Optional telemetry configuration (experimental).
2842
+ * Attempt to resume an ongoing streaming response.
2730
2843
  */
2731
- experimental_telemetry?: TelemetrySettings;
2844
+ resumeStream: (options?: ChatRequestOptions) => Promise<void>;
2732
2845
  /**
2733
- Additional provider-specific options. They are passed through
2734
- to the provider from the AI SDK and enable provider-specific
2735
- functionality that can be fully encapsulated in the provider.
2736
- */
2737
- providerOptions?: ProviderOptions;
2846
+ * Clear the error state and set the status to ready if the chat is in an error state.
2847
+ */
2848
+ clearError: () => void;
2849
+ addToolApprovalResponse: ChatAddToolApproveResponseFunction;
2850
+ addToolResult: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ state, tool, toolCallId, output, errorText, }: {
2851
+ state?: "output-available";
2852
+ tool: TOOL;
2853
+ toolCallId: string;
2854
+ output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
2855
+ errorText?: never;
2856
+ } | {
2857
+ state: "output-error";
2858
+ tool: TOOL;
2859
+ toolCallId: string;
2860
+ output?: never;
2861
+ errorText: string;
2862
+ }) => Promise<void>;
2738
2863
  /**
2739
- * Maximum number of concurrent requests.
2740
- *
2741
- * @default Infinity
2864
+ * Abort the current request immediately, keep the generated tokens if any.
2742
2865
  */
2743
- maxParallelCalls?: number;
2744
- }): Promise<EmbedManyResult<VALUE>>;
2866
+ stop: () => Promise<void>;
2867
+ private makeRequest;
2868
+ }
2745
2869
 
2746
- declare const symbol$d: unique symbol;
2747
- declare class InvalidArgumentError extends AISDKError {
2748
- private readonly [symbol$d];
2749
- readonly parameter: string;
2750
- readonly value: unknown;
2751
- constructor({ parameter, value, message, }: {
2752
- parameter: string;
2753
- value: unknown;
2754
- message: string;
2755
- });
2756
- static isInstance(error: unknown): error is InvalidArgumentError;
2757
- }
2870
+ declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
2758
2871
 
2759
- type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
2760
- type: 'text-start';
2761
- providerMetadata?: ProviderMetadata;
2762
- id: string;
2763
- } | {
2764
- type: 'text-delta';
2765
- id: string;
2766
- providerMetadata?: ProviderMetadata;
2767
- delta: string;
2768
- } | {
2769
- type: 'text-end';
2770
- providerMetadata?: ProviderMetadata;
2771
- id: string;
2772
- } | {
2773
- type: 'reasoning-start';
2774
- providerMetadata?: ProviderMetadata;
2775
- id: string;
2776
- } | {
2777
- type: 'reasoning-delta';
2778
- id: string;
2779
- providerMetadata?: ProviderMetadata;
2780
- delta: string;
2781
- } | {
2782
- type: 'reasoning-end';
2783
- id: string;
2784
- providerMetadata?: ProviderMetadata;
2785
- } | {
2786
- type: 'tool-input-start';
2787
- id: string;
2788
- toolName: string;
2789
- providerMetadata?: ProviderMetadata;
2790
- dynamic?: boolean;
2791
- } | {
2792
- type: 'tool-input-delta';
2793
- id: string;
2794
- delta: string;
2795
- providerMetadata?: ProviderMetadata;
2796
- } | {
2797
- type: 'tool-input-end';
2798
- id: string;
2799
- providerMetadata?: ProviderMetadata;
2800
- } | ToolApprovalRequestOutput<TOOLS> | ({
2801
- type: 'source';
2802
- } & Source) | {
2803
- type: 'file';
2804
- file: GeneratedFile;
2805
- } | ({
2806
- type: 'tool-call';
2807
- } & TypedToolCall<TOOLS>) | ({
2808
- type: 'tool-result';
2809
- } & TypedToolResult<TOOLS>) | ({
2810
- type: 'tool-error';
2811
- } & TypedToolError<TOOLS>) | {
2812
- type: 'file';
2813
- file: GeneratedFile;
2814
- } | {
2815
- type: 'stream-start';
2816
- warnings: LanguageModelV3CallWarning[];
2817
- } | {
2818
- type: 'response-metadata';
2819
- id?: string;
2820
- timestamp?: Date;
2821
- modelId?: string;
2822
- } | {
2823
- type: 'finish';
2824
- finishReason: FinishReason;
2825
- usage: LanguageModelUsage;
2826
- providerMetadata?: ProviderMetadata;
2827
- } | {
2828
- type: 'error';
2829
- error: unknown;
2830
- } | {
2831
- type: 'raw';
2832
- rawValue: unknown;
2833
- };
2872
+ /**
2873
+ Converts an array of UI messages from useChat into an array of ModelMessages that can be used
2874
+ with the AI functions (e.g. `streamText`, `generateText`).
2834
2875
 
2835
- declare const symbol$c: unique symbol;
2836
- declare class InvalidStreamPartError extends AISDKError {
2837
- private readonly [symbol$c];
2838
- readonly chunk: SingleRequestTextStreamPart<any>;
2839
- constructor({ chunk, message, }: {
2840
- chunk: SingleRequestTextStreamPart<any>;
2841
- message: string;
2842
- });
2843
- static isInstance(error: unknown): error is InvalidStreamPartError;
2844
- }
2876
+ @param messages - The UI messages to convert.
2877
+ @param options.tools - The tools to use.
2878
+ @param options.ignoreIncompleteToolCalls - Whether to ignore incomplete tool calls. Default is `false`.
2845
2879
 
2846
- declare const symbol$b: unique symbol;
2880
+ @returns An array of ModelMessages.
2881
+ */
2882
+ declare function convertToModelMessages(messages: Array<Omit<UIMessage, 'id'>>, options?: {
2883
+ tools?: ToolSet;
2884
+ ignoreIncompleteToolCalls?: boolean;
2885
+ }): ModelMessage[];
2847
2886
  /**
2848
- * An error occurred with the MCP client.
2887
+ @deprecated Use `convertToModelMessages` instead.
2849
2888
  */
2850
- declare class MCPClientError extends AISDKError {
2851
- private readonly [symbol$b];
2852
- readonly data?: unknown;
2853
- readonly code?: number;
2854
- constructor({ name, message, cause, data, code, }: {
2855
- name?: string;
2856
- message: string;
2857
- cause?: unknown;
2858
- data?: unknown;
2859
- code?: number;
2860
- });
2861
- static isInstance(error: unknown): error is MCPClientError;
2862
- }
2889
+ declare const convertToCoreMessages: typeof convertToModelMessages;
2863
2890
 
2864
- declare const symbol$a: unique symbol;
2891
+ type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
2892
+ id: string;
2893
+ messages: UI_MESSAGE[];
2894
+ requestMetadata: unknown;
2895
+ body: Record<string, any> | undefined;
2896
+ credentials: RequestCredentials | undefined;
2897
+ headers: HeadersInit | undefined;
2898
+ api: string;
2899
+ } & {
2900
+ trigger: 'submit-message' | 'regenerate-message';
2901
+ messageId: string | undefined;
2902
+ }) => {
2903
+ body: object;
2904
+ headers?: HeadersInit;
2905
+ credentials?: RequestCredentials;
2906
+ api?: string;
2907
+ } | PromiseLike<{
2908
+ body: object;
2909
+ headers?: HeadersInit;
2910
+ credentials?: RequestCredentials;
2911
+ api?: string;
2912
+ }>;
2913
+ type PrepareReconnectToStreamRequest = (options: {
2914
+ id: string;
2915
+ requestMetadata: unknown;
2916
+ body: Record<string, any> | undefined;
2917
+ credentials: RequestCredentials | undefined;
2918
+ headers: HeadersInit | undefined;
2919
+ api: string;
2920
+ }) => {
2921
+ headers?: HeadersInit;
2922
+ credentials?: RequestCredentials;
2923
+ api?: string;
2924
+ } | PromiseLike<{
2925
+ headers?: HeadersInit;
2926
+ credentials?: RequestCredentials;
2927
+ api?: string;
2928
+ }>;
2865
2929
  /**
2866
- Thrown when no image could be generated. This can have multiple causes:
2867
-
2868
- - The model failed to generate a response.
2869
- - The model generated a response that could not be parsed.
2930
+ * Options for the `HttpChatTransport` class.
2931
+ *
2932
+ * @param UI_MESSAGE - The type of message to be used in the chat.
2870
2933
  */
2871
- declare class NoImageGeneratedError extends AISDKError {
2872
- private readonly [symbol$a];
2934
+ type HttpChatTransportInitOptions<UI_MESSAGE extends UIMessage> = {
2873
2935
  /**
2874
- The response metadata for each call.
2936
+ * The API URL to be used for the chat transport.
2937
+ * Defaults to '/api/chat'.
2875
2938
  */
2876
- readonly responses: Array<ImageModelResponseMetadata> | undefined;
2877
- constructor({ message, cause, responses, }: {
2878
- message?: string;
2879
- cause?: Error;
2880
- responses?: Array<ImageModelResponseMetadata>;
2881
- });
2882
- static isInstance(error: unknown): error is NoImageGeneratedError;
2883
- }
2884
-
2885
- declare const symbol$9: unique symbol;
2886
- /**
2887
- Thrown when no object could be generated. This can have several causes:
2888
-
2889
- - The model failed to generate a response.
2890
- - The model generated a response that could not be parsed.
2891
- - The model generated a response that could not be validated against the schema.
2892
-
2893
- The error contains the following properties:
2894
-
2895
- - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
2896
- */
2897
- declare class NoObjectGeneratedError extends AISDKError {
2898
- private readonly [symbol$9];
2939
+ api?: string;
2899
2940
  /**
2900
- The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
2941
+ * The credentials mode to be used for the fetch request.
2942
+ * Possible values are: 'omit', 'same-origin', 'include'.
2943
+ * Defaults to 'same-origin'.
2901
2944
  */
2902
- readonly text: string | undefined;
2945
+ credentials?: Resolvable<RequestCredentials>;
2903
2946
  /**
2904
- The response metadata.
2947
+ * HTTP headers to be sent with the API request.
2905
2948
  */
2906
- readonly response: LanguageModelResponseMetadata | undefined;
2949
+ headers?: Resolvable<Record<string, string> | Headers>;
2907
2950
  /**
2908
- The usage of the model.
2951
+ * Extra body object to be sent with the API request.
2952
+ * @example
2953
+ * Send a `sessionId` to the API along with the messages.
2954
+ * ```js
2955
+ * useChat({
2956
+ * body: {
2957
+ * sessionId: '123',
2958
+ * }
2959
+ * })
2960
+ * ```
2909
2961
  */
2910
- readonly usage: LanguageModelUsage | undefined;
2962
+ body?: Resolvable<object>;
2911
2963
  /**
2912
- Reason why the model finished generating a response.
2964
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
2965
+ or to provide a custom fetch implementation for e.g. testing.
2966
+ */
2967
+ fetch?: FetchFunction;
2968
+ /**
2969
+ * When a function is provided, it will be used
2970
+ * to prepare the request body for the chat API. This can be useful for
2971
+ * customizing the request body based on the messages and data in the chat.
2972
+ *
2973
+ * @param id The id of the chat.
2974
+ * @param messages The current messages in the chat.
2975
+ * @param requestBody The request body object passed in the chat request.
2913
2976
  */
2914
- readonly finishReason: FinishReason | undefined;
2915
- constructor({ message, cause, text, response, usage, finishReason, }: {
2916
- message?: string;
2917
- cause?: Error;
2918
- text?: string;
2919
- response: LanguageModelResponseMetadata;
2920
- usage: LanguageModelUsage;
2921
- finishReason: FinishReason;
2922
- });
2923
- static isInstance(error: unknown): error is NoObjectGeneratedError;
2924
- }
2925
-
2926
- declare const symbol$8: unique symbol;
2927
- /**
2928
- Thrown when no LLM output was generated, e.g. because of errors.
2929
- */
2930
- declare class NoOutputGeneratedError extends AISDKError {
2931
- private readonly [symbol$8];
2932
- constructor({ message, cause, }?: {
2933
- message?: string;
2934
- cause?: Error;
2935
- });
2936
- static isInstance(error: unknown): error is NoOutputGeneratedError;
2937
- }
2938
-
2939
- declare const symbol$7: unique symbol;
2940
- /**
2941
- Thrown when no output type is specified and output-related methods are called.
2942
- */
2943
- declare class NoOutputSpecifiedError extends AISDKError {
2944
- private readonly [symbol$7];
2945
- constructor({ message }?: {
2946
- message?: string;
2947
- });
2948
- static isInstance(error: unknown): error is NoOutputSpecifiedError;
2949
- }
2950
-
2951
- /**
2952
- Error that is thrown when no speech audio was generated.
2953
- */
2954
- declare class NoSpeechGeneratedError extends AISDKError {
2955
- readonly responses: Array<SpeechModelResponseMetadata>;
2956
- constructor(options: {
2957
- responses: Array<SpeechModelResponseMetadata>;
2958
- });
2977
+ prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
2978
+ /**
2979
+ * When a function is provided, it will be used
2980
+ * to prepare the request body for the chat API. This can be useful for
2981
+ * customizing the request body based on the messages and data in the chat.
2982
+ *
2983
+ * @param id The id of the chat.
2984
+ * @param messages The current messages in the chat.
2985
+ * @param requestBody The request body object passed in the chat request.
2986
+ */
2987
+ prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
2988
+ };
2989
+ declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
2990
+ protected api: string;
2991
+ protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];
2992
+ protected headers: HttpChatTransportInitOptions<UI_MESSAGE>['headers'];
2993
+ protected body: HttpChatTransportInitOptions<UI_MESSAGE>['body'];
2994
+ protected fetch?: FetchFunction;
2995
+ protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
2996
+ protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
2997
+ constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
2998
+ sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
2999
+ reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
3000
+ protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
2959
3001
  }
2960
3002
 
2961
- declare const symbol$6: unique symbol;
2962
- declare class ToolCallRepairError extends AISDKError {
2963
- private readonly [symbol$6];
2964
- readonly originalError: NoSuchToolError | InvalidToolInputError;
2965
- constructor({ cause, originalError, message, }: {
2966
- message?: string;
2967
- cause: unknown;
2968
- originalError: NoSuchToolError | InvalidToolInputError;
2969
- });
2970
- static isInstance(error: unknown): error is ToolCallRepairError;
3003
+ declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
3004
+ constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
3005
+ protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
2971
3006
  }
2972
3007
 
2973
3008
  /**
2974
- Error that is thrown when a model with an unsupported version is used.
3009
+ Check if the last message is an assistant message with completed tool call approvals.
3010
+ The last step of the message must have at least one tool approval response and
3011
+ all tool approvals must have a response.
2975
3012
  */
2976
- declare class UnsupportedModelVersionError extends AISDKError {
2977
- readonly version: string;
2978
- readonly provider: string;
2979
- readonly modelId: string;
2980
- constructor(options: {
2981
- version: string;
2982
- provider: string;
2983
- modelId: string;
2984
- });
2985
- }
2986
-
2987
- declare const symbol$5: unique symbol;
2988
- declare class InvalidDataContentError extends AISDKError {
2989
- private readonly [symbol$5];
2990
- readonly content: unknown;
2991
- constructor({ content, cause, message, }: {
2992
- content: unknown;
2993
- cause?: unknown;
2994
- message?: string;
2995
- });
2996
- static isInstance(error: unknown): error is InvalidDataContentError;
2997
- }
2998
-
2999
- declare const symbol$4: unique symbol;
3000
- declare class InvalidMessageRoleError extends AISDKError {
3001
- private readonly [symbol$4];
3002
- readonly role: string;
3003
- constructor({ role, message, }: {
3004
- role: string;
3005
- message?: string;
3006
- });
3007
- static isInstance(error: unknown): error is InvalidMessageRoleError;
3008
- }
3009
-
3010
- declare const symbol$3: unique symbol;
3011
- declare class MessageConversionError extends AISDKError {
3012
- private readonly [symbol$3];
3013
- readonly originalMessage: Omit<UIMessage, 'id'>;
3014
- constructor({ originalMessage, message, }: {
3015
- originalMessage: Omit<UIMessage, 'id'>;
3016
- message: string;
3017
- });
3018
- static isInstance(error: unknown): error is MessageConversionError;
3019
- }
3013
+ declare function lastAssistantMessageIsCompleteWithApprovalResponses({ messages, }: {
3014
+ messages: UIMessage[];
3015
+ }): boolean;
3020
3016
 
3021
- declare const symbol$2: unique symbol;
3022
- declare class DownloadError extends AISDKError {
3023
- private readonly [symbol$2];
3024
- readonly url: string;
3025
- readonly statusCode?: number;
3026
- readonly statusText?: string;
3027
- constructor({ url, statusCode, statusText, cause, message, }: {
3028
- url: string;
3029
- statusCode?: number;
3030
- statusText?: string;
3031
- message?: string;
3032
- cause?: unknown;
3033
- });
3034
- static isInstance(error: unknown): error is DownloadError;
3035
- }
3017
+ /**
3018
+ Check if the message is an assistant message with completed tool calls.
3019
+ The last step of the message must have at least one tool invocation and
3020
+ all tool invocations must have a result.
3021
+ */
3022
+ declare function lastAssistantMessageIsCompleteWithToolCalls({ messages, }: {
3023
+ messages: UIMessage[];
3024
+ }): boolean;
3036
3025
 
3037
- declare const symbol$1: unique symbol;
3038
- type RetryErrorReason = 'maxRetriesExceeded' | 'errorNotRetryable' | 'abort';
3039
- declare class RetryError extends AISDKError {
3040
- private readonly [symbol$1];
3041
- readonly reason: RetryErrorReason;
3042
- readonly lastError: unknown;
3043
- readonly errors: Array<unknown>;
3044
- constructor({ message, reason, errors, }: {
3045
- message: string;
3046
- reason: RetryErrorReason;
3047
- errors: Array<unknown>;
3048
- });
3049
- static isInstance(error: unknown): error is RetryError;
3026
+ declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
3027
+ constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
3028
+ protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
3050
3029
  }
3051
3030
 
3052
- /**
3053
- The result of a `generateImage` call.
3054
- It contains the images and additional information.
3055
- */
3056
- interface GenerateImageResult {
3031
+ type CompletionRequestOptions = {
3057
3032
  /**
3058
- The first image that was generated.
3033
+ An optional object of headers to be passed to the API endpoint.
3059
3034
  */
3060
- readonly image: GeneratedFile;
3061
- /**
3062
- The images that were generated.
3063
- */
3064
- readonly images: Array<GeneratedFile>;
3035
+ headers?: Record<string, string> | Headers;
3065
3036
  /**
3066
- Warnings for the call, e.g. unsupported settings.
3037
+ An optional object to be passed to the API endpoint.
3067
3038
  */
3068
- readonly warnings: Array<ImageGenerationWarning>;
3039
+ body?: object;
3040
+ };
3041
+ type UseCompletionOptions = {
3069
3042
  /**
3070
- Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
3043
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
3044
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
3071
3045
  */
3072
- readonly responses: Array<ImageModelResponseMetadata>;
3046
+ api?: string;
3073
3047
  /**
3074
- * Provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific
3075
- * results that can be fully encapsulated in the provider.
3048
+ * An unique identifier for the chat. If not provided, a random one will be
3049
+ * generated. When provided, the `useChat` hook with the same `id` will
3050
+ * have shared states across components.
3076
3051
  */
3077
- readonly providerMetadata: ImageModelProviderMetadata;
3078
- }
3079
-
3080
- /**
3081
- Generates images using an image model.
3082
-
3083
- @param model - The image model to use.
3084
- @param prompt - The prompt that should be used to generate the image.
3085
- @param n - Number of images to generate. Default: 1.
3086
- @param size - Size of the images to generate. Must have the format `{width}x{height}`.
3087
- @param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
3088
- @param seed - Seed for the image generation.
3089
- @param providerOptions - Additional provider-specific options that are passed through to the provider
3090
- as body parameters.
3091
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3092
- @param abortSignal - An optional abort signal that can be used to cancel the call.
3093
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3094
-
3095
- @returns A result object that contains the generated images.
3096
- */
3097
- declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
3052
+ id?: string;
3098
3053
  /**
3099
- The image model to use.
3100
- */
3101
- model: ImageModelV3;
3054
+ * Initial prompt input of the completion.
3055
+ */
3056
+ initialInput?: string;
3102
3057
  /**
3103
- The prompt that should be used to generate the image.
3058
+ * Initial completion result. Useful to load an existing history.
3104
3059
  */
3105
- prompt: string;
3060
+ initialCompletion?: string;
3106
3061
  /**
3107
- Number of images to generate.
3062
+ * Callback function to be called when the completion is finished streaming.
3108
3063
  */
3109
- n?: number;
3064
+ onFinish?: (prompt: string, completion: string) => void;
3110
3065
  /**
3111
- Number of images to generate.
3066
+ * Callback function to be called when an error is encountered.
3112
3067
  */
3113
- maxImagesPerCall?: number;
3068
+ onError?: (error: Error) => void;
3114
3069
  /**
3115
- Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
3070
+ * The credentials mode to be used for the fetch request.
3071
+ * Possible values are: 'omit', 'same-origin', 'include'.
3072
+ * Defaults to 'same-origin'.
3116
3073
  */
3117
- size?: `${number}x${number}`;
3074
+ credentials?: RequestCredentials;
3118
3075
  /**
3119
- Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
3076
+ * HTTP headers to be sent with the API request.
3120
3077
  */
3121
- aspectRatio?: `${number}:${number}`;
3078
+ headers?: Record<string, string> | Headers;
3122
3079
  /**
3123
- Seed for the image generation. If not provided, the default seed will be used.
3080
+ * Extra body object to be sent with the API request.
3081
+ * @example
3082
+ * Send a `sessionId` to the API along with the prompt.
3083
+ * ```js
3084
+ * useChat({
3085
+ * body: {
3086
+ * sessionId: '123',
3087
+ * }
3088
+ * })
3089
+ * ```
3124
3090
  */
3125
- seed?: number;
3091
+ body?: object;
3126
3092
  /**
3127
- Additional provider-specific options that are passed through to the provider
3128
- as body parameters.
3129
-
3130
- The outer record is keyed by the provider name, and the inner
3131
- record is keyed by the provider-specific metadata key.
3132
- ```ts
3133
- {
3134
- "openai": {
3135
- "style": "vivid"
3136
- }
3137
- }
3138
- ```
3093
+ Streaming protocol that is used. Defaults to `data`.
3139
3094
  */
3140
- providerOptions?: ProviderOptions;
3095
+ streamProtocol?: 'data' | 'text';
3141
3096
  /**
3142
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
3143
-
3144
- @default 2
3097
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
3098
+ or to provide a custom fetch implementation for e.g. testing.
3099
+ */
3100
+ fetch?: FetchFunction;
3101
+ };
3102
+
3103
+ type SafeValidateUIMessagesResult<UI_MESSAGE extends UIMessage> = {
3104
+ success: true;
3105
+ data: Array<UI_MESSAGE>;
3106
+ } | {
3107
+ success: false;
3108
+ error: Error;
3109
+ };
3110
+ /**
3111
+ * Validates a list of UI messages like `validateUIMessages`,
3112
+ * but instead of throwing it returns `{ success: true, data }`
3113
+ * or `{ success: false, error }`.
3114
+ */
3115
+ declare function safeValidateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
3116
+ messages: unknown;
3117
+ metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
3118
+ dataSchemas?: {
3119
+ [NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
3120
+ };
3121
+ tools?: {
3122
+ [NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
3123
+ };
3124
+ }): Promise<SafeValidateUIMessagesResult<UI_MESSAGE>>;
3125
+ /**
3126
+ * Validates a list of UI messages.
3127
+ *
3128
+ * Metadata, data parts, and generic tool call structures are only validated if
3129
+ * the corresponding schemas are provided. Otherwise, they are assumed to be
3130
+ * valid.
3131
+ */
3132
+ declare function validateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
3133
+ messages: unknown;
3134
+ metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
3135
+ dataSchemas?: {
3136
+ [NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
3137
+ };
3138
+ tools?: {
3139
+ [NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
3140
+ };
3141
+ }): Promise<Array<UI_MESSAGE>>;
3142
+
3143
+ interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
3144
+ /**
3145
+ * Appends a data stream part to the stream.
3145
3146
  */
3146
- maxRetries?: number;
3147
+ write(part: InferUIMessageChunk<UI_MESSAGE>): void;
3147
3148
  /**
3148
- Abort signal.
3149
- */
3150
- abortSignal?: AbortSignal;
3149
+ * Merges the contents of another stream to this stream.
3150
+ */
3151
+ merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
3151
3152
  /**
3152
- Additional headers to include in the request.
3153
- Only applicable for HTTP-based providers.
3154
- */
3155
- headers?: Record<string, string>;
3156
- }): Promise<GenerateImageResult>;
3153
+ * Error handler that is used by the data stream writer.
3154
+ * This is intended for forwarding when merging streams
3155
+ * to prevent duplicated error masking.
3156
+ */
3157
+ onError: ErrorHandler | undefined;
3158
+ }
3159
+
3160
+ declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
3161
+ execute: (options: {
3162
+ writer: UIMessageStreamWriter<UI_MESSAGE>;
3163
+ }) => Promise<void> | void;
3164
+ onError?: (error: unknown) => string;
3165
+ /**
3166
+ * The original messages. If they are provided, persistence mode is assumed,
3167
+ * and a message ID is provided for the response message.
3168
+ */
3169
+ originalMessages?: UI_MESSAGE[];
3170
+ onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
3171
+ generateId?: IdGenerator;
3172
+ }): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
3173
+
3174
+ declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
3175
+ stream: ReadableStream<UIMessageChunk>;
3176
+ }): Response;
3177
+
3178
+ declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
3179
+ constructor();
3180
+ }
3181
+
3182
+ declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
3183
+ response: ServerResponse;
3184
+ stream: ReadableStream<UIMessageChunk>;
3185
+ } & UIMessageStreamResponseInit): void;
3157
3186
 
3158
3187
  /**
3159
- The result of a `generateObject` call.
3188
+ * Transforms a stream of `UIMessageChunk`s into an `AsyncIterableStream` of `UIMessage`s.
3189
+ *
3190
+ * @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
3191
+ * @param options.stream - The stream of `UIMessageChunk`s to read.
3192
+ * @param options.terminateOnError - Whether to terminate the stream if an error occurs.
3193
+ * @param options.onError - A function that is called when an error occurs.
3194
+ *
3195
+ * @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
3196
+ * as it is being completed.
3160
3197
  */
3161
- interface GenerateObjectResult<OBJECT> {
3198
+ declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, onError, terminateOnError, }: {
3199
+ message?: UI_MESSAGE;
3200
+ stream: ReadableStream<UIMessageChunk>;
3201
+ onError?: (error: unknown) => void;
3202
+ terminateOnError?: boolean;
3203
+ }): AsyncIterableStream<UI_MESSAGE>;
3204
+
3205
+ declare const UI_MESSAGE_STREAM_HEADERS: {
3206
+ 'content-type': string;
3207
+ 'cache-control': string;
3208
+ connection: string;
3209
+ 'x-vercel-ai-ui-message-stream': string;
3210
+ 'x-accel-buffering': string;
3211
+ };
3212
+
3213
+ /**
3214
+ * Runs the agent and stream the output as a UI message stream.
3215
+ *
3216
+ * @param agent - The agent to run.
3217
+ * @param messages - The input UI messages.
3218
+ *
3219
+ * @returns The UI message stream.
3220
+ */
3221
+ declare function createAgentUIStream<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ agent, messages, ...uiMessageStreamOptions }: {
3222
+ agent: Agent<TOOLS, OUTPUT>;
3223
+ messages: unknown[];
3224
+ } & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<never, never, InferUITools<TOOLS>>>>>;
3225
+
3226
+ /**
3227
+ * Pipes the agent UI message stream to a Node.js ServerResponse object.
3228
+ *
3229
+ * @param agent - The agent to run.
3230
+ * @param messages - The input UI messages.
3231
+ */
3232
+ declare function pipeAgentUIStreamToResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3233
+ response: ServerResponse;
3234
+ agent: Agent<TOOLS, OUTPUT>;
3235
+ messages: unknown[];
3236
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<void>;
3237
+
3238
+ /**
3239
+ The result of an `embed` call.
3240
+ It contains the embedding, the value, and additional information.
3241
+ */
3242
+ interface EmbedResult<VALUE> {
3162
3243
  /**
3163
- The generated object (typed according to the schema).
3244
+ The value that was embedded.
3164
3245
  */
3165
- readonly object: OBJECT;
3246
+ readonly value: VALUE;
3166
3247
  /**
3167
- * The reasoning that was used to generate the object.
3168
- * Concatenated from all reasoning parts.
3169
- */
3170
- readonly reasoning: string | undefined;
3248
+ The embedding of the value.
3249
+ */
3250
+ readonly embedding: Embedding;
3171
3251
  /**
3172
- The reason why the generation finished.
3173
- */
3174
- readonly finishReason: FinishReason;
3252
+ The embedding token usage.
3253
+ */
3254
+ readonly usage: EmbeddingModelUsage;
3175
3255
  /**
3176
- The token usage of the generated text.
3256
+ Optional provider-specific metadata.
3177
3257
  */
3178
- readonly usage: LanguageModelUsage;
3258
+ readonly providerMetadata?: ProviderMetadata;
3179
3259
  /**
3180
- Warnings from the model provider (e.g. unsupported settings).
3260
+ Optional response data.
3181
3261
  */
3182
- readonly warnings: CallWarning[] | undefined;
3183
- /**
3184
- Additional request information.
3185
- */
3186
- readonly request: LanguageModelRequestMetadata;
3187
- /**
3188
- Additional response information.
3189
- */
3190
- readonly response: LanguageModelResponseMetadata & {
3262
+ readonly response?: {
3191
3263
  /**
3192
- Response body (available only for providers that use HTTP requests).
3264
+ Response headers.
3265
+ */
3266
+ headers?: Record<string, string>;
3267
+ /**
3268
+ The response body.
3193
3269
  */
3194
3270
  body?: unknown;
3195
3271
  };
3196
- /**
3197
- Additional provider-specific metadata. They are passed through
3198
- from the provider to the AI SDK and enable provider-specific
3199
- results that can be fully encapsulated in the provider.
3200
- */
3201
- readonly providerMetadata: ProviderMetadata | undefined;
3202
- /**
3203
- Converts the object to a JSON response.
3204
- The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
3205
- */
3206
- toJsonResponse(init?: ResponseInit): Response;
3207
3272
  }
3208
3273
 
3209
3274
  /**
3210
- A function that attempts to repair the raw output of the model
3211
- to enable JSON parsing.
3212
-
3213
- Should return the repaired text or null if the text cannot be repaired.
3214
- */
3215
- type RepairTextFunction = (options: {
3216
- text: string;
3217
- error: JSONParseError | TypeValidationError;
3218
- }) => Promise<string | null>;
3219
-
3220
- /**
3221
- Generate a structured, typed object for a given prompt and schema using a language model.
3222
-
3223
- This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3224
-
3225
- @param model - The language model to use.
3226
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
3227
-
3228
- @param system - A system message that will be part of the prompt.
3229
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
3230
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3275
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
3231
3276
 
3232
- @param maxOutputTokens - Maximum number of tokens to generate.
3233
- @param temperature - Temperature setting.
3234
- The value is passed through to the provider. The range depends on the provider and model.
3235
- It is recommended to set either `temperature` or `topP`, but not both.
3236
- @param topP - Nucleus sampling.
3237
- The value is passed through to the provider. The range depends on the provider and model.
3238
- It is recommended to set either `temperature` or `topP`, but not both.
3239
- @param topK - Only sample from the top K options for each subsequent token.
3240
- Used to remove "long tail" low probability responses.
3241
- Recommended for advanced use cases only. You usually only need to use temperature.
3242
- @param presencePenalty - Presence penalty setting.
3243
- It affects the likelihood of the model to repeat information that is already in the prompt.
3244
- The value is passed through to the provider. The range depends on the provider and model.
3245
- @param frequencyPenalty - Frequency penalty setting.
3246
- It affects the likelihood of the model to repeatedly use the same words or phrases.
3247
- The value is passed through to the provider. The range depends on the provider and model.
3248
- @param stopSequences - Stop sequences.
3249
- If set, the model will stop generating text when one of the stop sequences is generated.
3250
- @param seed - The seed (integer) to use for random sampling.
3251
- If set and supported by the model, calls will generate deterministic results.
3277
+ @param model - The embedding model to use.
3278
+ @param value - The value that should be embedded.
3252
3279
 
3253
3280
  @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3254
3281
  @param abortSignal - An optional abort signal that can be used to cancel the call.
3255
3282
  @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3256
3283
 
3257
- @param schema - The schema of the object that the model should generate.
3258
- @param schemaName - Optional name of the output that should be generated.
3259
- Used by some providers for additional LLM guidance, e.g.
3260
- via tool or schema name.
3261
- @param schemaDescription - Optional description of the output that should be generated.
3262
- Used by some providers for additional LLM guidance, e.g.
3263
- via tool or schema description.
3264
-
3265
- @param output - The type of the output.
3266
-
3267
- - 'object': The output is an object.
3268
- - 'array': The output is an array.
3269
- - 'enum': The output is an enum.
3270
- - 'no-schema': The output is not a schema.
3271
-
3272
- @param experimental_repairText - A function that attempts to repair the raw output of the model
3273
- to enable JSON parsing.
3274
-
3275
- @param experimental_telemetry - Optional telemetry configuration (experimental).
3276
-
3277
- @param providerOptions - Additional provider-specific options. They are passed through
3278
- to the provider from the AI SDK and enable provider-specific
3279
- functionality that can be fully encapsulated in the provider.
3280
-
3281
- @returns
3282
- A result object that contains the generated object, the finish reason, the token usage, and additional information.
3284
+ @returns A result object that contains the embedding, the value, and additional information.
3283
3285
  */
3284
- declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
3285
- /**
3286
- The enum values that the model should use.
3287
- */
3288
- enum: Array<RESULT>;
3289
- mode?: 'json';
3290
- output: 'enum';
3291
- } : OUTPUT extends 'no-schema' ? {} : {
3292
- /**
3293
- The schema of the object that the model should generate.
3294
- */
3295
- schema: SCHEMA;
3296
- /**
3297
- Optional name of the output that should be generated.
3298
- Used by some providers for additional LLM guidance, e.g.
3299
- via tool or schema name.
3300
- */
3301
- schemaName?: string;
3302
- /**
3303
- Optional description of the output that should be generated.
3304
- Used by some providers for additional LLM guidance, e.g.
3305
- via tool or schema description.
3306
- */
3307
- schemaDescription?: string;
3286
+ declare function embed<VALUE = string>({ model: modelArg, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
3308
3287
  /**
3309
- The mode to use for object generation.
3310
-
3311
- The schema is converted into a JSON schema and used in one of the following ways
3312
-
3313
- - 'auto': The provider will choose the best mode for the model.
3314
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3315
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3316
-
3317
- Please note that most providers do not support all modes.
3318
-
3319
- Default and recommended: 'auto' (best mode for the model).
3320
- */
3321
- mode?: 'auto' | 'json' | 'tool';
3322
- }) & {
3323
- output?: OUTPUT;
3288
+ The embedding model to use.
3289
+ */
3290
+ model: EmbeddingModel<VALUE>;
3324
3291
  /**
3325
- The language model to use.
3292
+ The value that should be embedded.
3326
3293
  */
3327
- model: LanguageModel;
3294
+ value: VALUE;
3328
3295
  /**
3329
- A function that attempts to repair the raw output of the model
3330
- to enable JSON parsing.
3296
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
3297
+
3298
+ @default 2
3331
3299
  */
3332
- experimental_repairText?: RepairTextFunction;
3300
+ maxRetries?: number;
3333
3301
  /**
3334
- Optional telemetry configuration (experimental).
3335
- */
3336
- experimental_telemetry?: TelemetrySettings;
3302
+ Abort signal.
3303
+ */
3304
+ abortSignal?: AbortSignal;
3337
3305
  /**
3338
- Custom download function to use for URLs.
3339
-
3340
- By default, files are downloaded if the model does not support the URL for the given media type.
3341
- */
3342
- experimental_download?: DownloadFunction | undefined;
3306
+ Additional headers to include in the request.
3307
+ Only applicable for HTTP-based providers.
3308
+ */
3309
+ headers?: Record<string, string>;
3343
3310
  /**
3344
- Additional provider-specific options. They are passed through
3345
- to the provider from the AI SDK and enable provider-specific
3346
- functionality that can be fully encapsulated in the provider.
3347
- */
3311
+ Additional provider-specific options. They are passed through
3312
+ to the provider from the AI SDK and enable provider-specific
3313
+ functionality that can be fully encapsulated in the provider.
3314
+ */
3348
3315
  providerOptions?: ProviderOptions;
3349
3316
  /**
3350
- * Internal. For test use only. May change without notice.
3317
+ * Optional telemetry configuration (experimental).
3351
3318
  */
3352
- _internal?: {
3353
- generateId?: () => string;
3354
- currentDate?: () => Date;
3355
- };
3356
- }): Promise<GenerateObjectResult<RESULT>>;
3357
-
3358
- /**
3359
- * Consumes a ReadableStream until it's fully read.
3360
- *
3361
- * This function reads the stream chunk by chunk until the stream is exhausted.
3362
- * It doesn't process or return the data from the stream; it simply ensures
3363
- * that the entire stream is read.
3364
- *
3365
- * @param {ReadableStream} stream - The ReadableStream to be consumed.
3366
- * @returns {Promise<void>} A promise that resolves when the stream is fully consumed.
3367
- */
3368
- declare function consumeStream({ stream, onError, }: {
3369
- stream: ReadableStream;
3370
- onError?: (error: unknown) => void;
3371
- }): Promise<void>;
3372
-
3373
- /**
3374
- * Calculates the cosine similarity between two vectors. This is a useful metric for
3375
- * comparing the similarity of two vectors such as embeddings.
3376
- *
3377
- * @param vector1 - The first vector.
3378
- * @param vector2 - The second vector.
3379
- *
3380
- * @returns The cosine similarity between vector1 and vector2.
3381
- * @returns 0 if either vector is the zero vector.
3382
- *
3383
- * @throws {InvalidArgumentError} If the vectors do not have the same length.
3384
- */
3385
- declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
3319
+ experimental_telemetry?: TelemetrySettings;
3320
+ }): Promise<EmbedResult<VALUE>>;
3386
3321
 
3387
3322
  /**
3388
- * Converts a data URL of type text/* to a text string.
3323
+ The result of a `embedMany` call.
3324
+ It contains the embeddings, the values, and additional information.
3389
3325
  */
3390
- declare function getTextFromDataUrl(dataUrl: string): string;
3326
+ interface EmbedManyResult<VALUE> {
3327
+ /**
3328
+ The values that were embedded.
3329
+ */
3330
+ readonly values: Array<VALUE>;
3331
+ /**
3332
+ The embeddings. They are in the same order as the values.
3333
+ */
3334
+ readonly embeddings: Array<Embedding>;
3335
+ /**
3336
+ The embedding token usage.
3337
+ */
3338
+ readonly usage: EmbeddingModelUsage;
3339
+ /**
3340
+ Optional provider-specific metadata.
3341
+ */
3342
+ readonly providerMetadata?: ProviderMetadata;
3343
+ /**
3344
+ Optional raw response data.
3345
+ */
3346
+ readonly responses?: Array<{
3347
+ /**
3348
+ Response headers.
3349
+ */
3350
+ headers?: Record<string, string>;
3351
+ /**
3352
+ The response body.
3353
+ */
3354
+ body?: unknown;
3355
+ } | undefined>;
3356
+ }
3391
3357
 
3392
3358
  /**
3393
- * Performs a deep-equal comparison of two parsed JSON objects.
3394
- *
3395
- * @param {any} obj1 - The first object to compare.
3396
- * @param {any} obj2 - The second object to compare.
3397
- * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
3398
- */
3399
- declare function isDeepEqualData(obj1: any, obj2: any): boolean;
3400
-
3401
- declare function parsePartialJson(jsonText: string | undefined): Promise<{
3402
- value: JSONValue$1 | undefined;
3403
- state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
3404
- }>;
3359
+ Embed several values using an embedding model. The type of the value is defined
3360
+ by the embedding model.
3405
3361
 
3406
- type Job = () => Promise<void>;
3362
+ `embedMany` automatically splits large requests into smaller chunks if the model
3363
+ has a limit on how many embeddings can be generated in a single call.
3407
3364
 
3408
- declare class SerialJobExecutor {
3409
- private queue;
3410
- private isProcessing;
3411
- private processQueue;
3412
- run(job: Job): Promise<void>;
3413
- }
3365
+ @param model - The embedding model to use.
3366
+ @param values - The values that should be embedded.
3414
3367
 
3415
- /**
3416
- * Creates a ReadableStream that emits the provided values with an optional delay between each value.
3417
- *
3418
- * @param options - The configuration options
3419
- * @param options.chunks - Array of values to be emitted by the stream
3420
- * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
3421
- * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
3422
- * @returns A ReadableStream that emits the provided values
3423
- */
3424
- declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
3425
- chunks: T[];
3426
- initialDelayInMs?: number | null;
3427
- chunkDelayInMs?: number | null;
3428
- _internal?: {
3429
- delay?: (ms: number | null) => Promise<void>;
3430
- };
3431
- }): ReadableStream<T>;
3368
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3369
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
3370
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3432
3371
 
3433
- /**
3434
- The result of a `streamObject` call that contains the partial object stream and additional information.
3372
+ @returns A result object that contains the embeddings, the value, and additional information.
3435
3373
  */
3436
- interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3374
+ declare function embedMany<VALUE = string>({ model: modelArg, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
3437
3375
  /**
3438
- Warnings from the model provider (e.g. unsupported settings)
3376
+ The embedding model to use.
3439
3377
  */
3440
- readonly warnings: Promise<CallWarning[] | undefined>;
3378
+ model: EmbeddingModel<VALUE>;
3441
3379
  /**
3442
- The token usage of the generated response. Resolved when the response is finished.
3443
- */
3444
- readonly usage: Promise<LanguageModelUsage>;
3380
+ The values that should be embedded.
3381
+ */
3382
+ values: Array<VALUE>;
3445
3383
  /**
3446
- Additional provider-specific metadata. They are passed through
3447
- from the provider to the AI SDK and enable provider-specific
3448
- results that can be fully encapsulated in the provider.
3384
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
3385
+
3386
+ @default 2
3449
3387
  */
3450
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
3388
+ maxRetries?: number;
3451
3389
  /**
3452
- Additional request information from the last step.
3390
+ Abort signal.
3453
3391
  */
3454
- readonly request: Promise<LanguageModelRequestMetadata>;
3392
+ abortSignal?: AbortSignal;
3455
3393
  /**
3456
- Additional response information.
3394
+ Additional headers to include in the request.
3395
+ Only applicable for HTTP-based providers.
3457
3396
  */
3458
- readonly response: Promise<LanguageModelResponseMetadata>;
3397
+ headers?: Record<string, string>;
3459
3398
  /**
3460
- The reason why the generation finished. Taken from the last step.
3461
-
3462
- Resolved when the response is finished.
3463
- */
3464
- readonly finishReason: Promise<FinishReason>;
3399
+ * Optional telemetry configuration (experimental).
3400
+ */
3401
+ experimental_telemetry?: TelemetrySettings;
3465
3402
  /**
3466
- The generated object (typed according to the schema). Resolved when the response is finished.
3467
- */
3468
- readonly object: Promise<RESULT>;
3403
+ Additional provider-specific options. They are passed through
3404
+ to the provider from the AI SDK and enable provider-specific
3405
+ functionality that can be fully encapsulated in the provider.
3406
+ */
3407
+ providerOptions?: ProviderOptions;
3469
3408
  /**
3470
- Stream of partial objects. It gets more complete as the stream progresses.
3471
-
3472
- Note that the partial object is not validated.
3473
- If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
3474
- */
3475
- readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
3476
- /**
3477
- * Stream over complete array elements. Only available if the output strategy is set to `array`.
3409
+ * Maximum number of concurrent requests.
3410
+ *
3411
+ * @default Infinity
3478
3412
  */
3479
- readonly elementStream: ELEMENT_STREAM;
3480
- /**
3481
- Text stream of the JSON representation of the generated object. It contains text chunks.
3482
- When the stream is finished, the object is valid JSON that can be parsed.
3483
- */
3484
- readonly textStream: AsyncIterableStream<string>;
3485
- /**
3486
- Stream of different types of events, including partial objects, errors, and finish events.
3487
- Only errors that stop the stream, such as network errors, are thrown.
3488
- */
3489
- readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
3490
- /**
3491
- Writes text delta output to a Node.js response-like object.
3492
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
3493
- writes each text delta as a separate chunk.
3494
-
3495
- @param response A Node.js response-like object (ServerResponse).
3496
- @param init Optional headers, status code, and status text.
3497
- */
3498
- pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
3499
- /**
3500
- Creates a simple text stream response.
3501
- The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
3502
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
3503
- Non-text-delta events are ignored.
3504
-
3505
- @param init Optional headers, status code, and status text.
3506
- */
3507
- toTextStreamResponse(init?: ResponseInit): Response;
3413
+ maxParallelCalls?: number;
3414
+ }): Promise<EmbedManyResult<VALUE>>;
3415
+
3416
+ declare const symbol$d: unique symbol;
3417
+ declare class InvalidArgumentError extends AISDKError {
3418
+ private readonly [symbol$d];
3419
+ readonly parameter: string;
3420
+ readonly value: unknown;
3421
+ constructor({ parameter, value, message, }: {
3422
+ parameter: string;
3423
+ value: unknown;
3424
+ message: string;
3425
+ });
3426
+ static isInstance(error: unknown): error is InvalidArgumentError;
3508
3427
  }
3509
- type ObjectStreamPart<PARTIAL> = {
3510
- type: 'object';
3511
- object: PARTIAL;
3428
+
3429
+ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3430
+ type: 'text-start';
3431
+ providerMetadata?: ProviderMetadata;
3432
+ id: string;
3512
3433
  } | {
3513
3434
  type: 'text-delta';
3514
- textDelta: string;
3435
+ id: string;
3436
+ providerMetadata?: ProviderMetadata;
3437
+ delta: string;
3515
3438
  } | {
3516
- type: 'error';
3517
- error: unknown;
3439
+ type: 'text-end';
3440
+ providerMetadata?: ProviderMetadata;
3441
+ id: string;
3442
+ } | {
3443
+ type: 'reasoning-start';
3444
+ providerMetadata?: ProviderMetadata;
3445
+ id: string;
3446
+ } | {
3447
+ type: 'reasoning-delta';
3448
+ id: string;
3449
+ providerMetadata?: ProviderMetadata;
3450
+ delta: string;
3451
+ } | {
3452
+ type: 'reasoning-end';
3453
+ id: string;
3454
+ providerMetadata?: ProviderMetadata;
3455
+ } | {
3456
+ type: 'tool-input-start';
3457
+ id: string;
3458
+ toolName: string;
3459
+ providerMetadata?: ProviderMetadata;
3460
+ dynamic?: boolean;
3461
+ } | {
3462
+ type: 'tool-input-delta';
3463
+ id: string;
3464
+ delta: string;
3465
+ providerMetadata?: ProviderMetadata;
3466
+ } | {
3467
+ type: 'tool-input-end';
3468
+ id: string;
3469
+ providerMetadata?: ProviderMetadata;
3470
+ } | ToolApprovalRequestOutput<TOOLS> | ({
3471
+ type: 'source';
3472
+ } & Source) | {
3473
+ type: 'file';
3474
+ file: GeneratedFile;
3475
+ } | ({
3476
+ type: 'tool-call';
3477
+ } & TypedToolCall<TOOLS>) | ({
3478
+ type: 'tool-result';
3479
+ } & TypedToolResult<TOOLS>) | ({
3480
+ type: 'tool-error';
3481
+ } & TypedToolError<TOOLS>) | {
3482
+ type: 'file';
3483
+ file: GeneratedFile;
3484
+ } | {
3485
+ type: 'stream-start';
3486
+ warnings: LanguageModelV3CallWarning[];
3487
+ } | {
3488
+ type: 'response-metadata';
3489
+ id?: string;
3490
+ timestamp?: Date;
3491
+ modelId?: string;
3518
3492
  } | {
3519
3493
  type: 'finish';
3520
3494
  finishReason: FinishReason;
3521
3495
  usage: LanguageModelUsage;
3522
- response: LanguageModelResponseMetadata;
3523
3496
  providerMetadata?: ProviderMetadata;
3497
+ } | {
3498
+ type: 'error';
3499
+ error: unknown;
3500
+ } | {
3501
+ type: 'raw';
3502
+ rawValue: unknown;
3524
3503
  };
3525
3504
 
3526
- /**
3527
- Callback that is set using the `onError` option.
3505
+ declare const symbol$c: unique symbol;
3506
+ declare class InvalidStreamPartError extends AISDKError {
3507
+ private readonly [symbol$c];
3508
+ readonly chunk: SingleRequestTextStreamPart<any>;
3509
+ constructor({ chunk, message, }: {
3510
+ chunk: SingleRequestTextStreamPart<any>;
3511
+ message: string;
3512
+ });
3513
+ static isInstance(error: unknown): error is InvalidStreamPartError;
3514
+ }
3528
3515
 
3529
- @param event - The event that is passed to the callback.
3516
+ declare const symbol$b: unique symbol;
3517
+ /**
3518
+ * An error occurred with the MCP client.
3530
3519
  */
3531
- type StreamObjectOnErrorCallback = (event: {
3532
- error: unknown;
3533
- }) => Promise<void> | void;
3520
+ declare class MCPClientError extends AISDKError {
3521
+ private readonly [symbol$b];
3522
+ readonly data?: unknown;
3523
+ readonly code?: number;
3524
+ constructor({ name, message, cause, data, code, }: {
3525
+ name?: string;
3526
+ message: string;
3527
+ cause?: unknown;
3528
+ data?: unknown;
3529
+ code?: number;
3530
+ });
3531
+ static isInstance(error: unknown): error is MCPClientError;
3532
+ }
3533
+
3534
+ declare const symbol$a: unique symbol;
3534
3535
  /**
3535
- Callback that is set using the `onFinish` option.
3536
+ Thrown when no image could be generated. This can have multiple causes:
3536
3537
 
3537
- @param event - The event that is passed to the callback.
3538
+ - The model failed to generate a response.
3539
+ - The model generated a response that could not be parsed.
3538
3540
  */
3539
- type StreamObjectOnFinishCallback<RESULT> = (event: {
3540
- /**
3541
- The token usage of the generated response.
3542
- */
3543
- usage: LanguageModelUsage;
3541
+ declare class NoImageGeneratedError extends AISDKError {
3542
+ private readonly [symbol$a];
3544
3543
  /**
3545
- The generated object. Can be undefined if the final object does not match the schema.
3546
- */
3547
- object: RESULT | undefined;
3544
+ The response metadata for each call.
3545
+ */
3546
+ readonly responses: Array<ImageModelResponseMetadata> | undefined;
3547
+ constructor({ message, cause, responses, }: {
3548
+ message?: string;
3549
+ cause?: Error;
3550
+ responses?: Array<ImageModelResponseMetadata>;
3551
+ });
3552
+ static isInstance(error: unknown): error is NoImageGeneratedError;
3553
+ }
3554
+
3555
+ declare const symbol$9: unique symbol;
3556
+ /**
3557
+ Thrown when no object could be generated. This can have several causes:
3558
+
3559
+ - The model failed to generate a response.
3560
+ - The model generated a response that could not be parsed.
3561
+ - The model generated a response that could not be validated against the schema.
3562
+
3563
+ The error contains the following properties:
3564
+
3565
+ - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
3566
+ */
3567
+ declare class NoObjectGeneratedError extends AISDKError {
3568
+ private readonly [symbol$9];
3548
3569
  /**
3549
- Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
3550
- */
3551
- error: unknown | undefined;
3570
+ The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
3571
+ */
3572
+ readonly text: string | undefined;
3552
3573
  /**
3553
- Response metadata.
3554
- */
3555
- response: LanguageModelResponseMetadata;
3574
+ The response metadata.
3575
+ */
3576
+ readonly response: LanguageModelResponseMetadata | undefined;
3556
3577
  /**
3557
- Warnings from the model provider (e.g. unsupported settings).
3558
- */
3559
- warnings?: CallWarning[];
3578
+ The usage of the model.
3579
+ */
3580
+ readonly usage: LanguageModelUsage | undefined;
3560
3581
  /**
3561
- Additional provider-specific metadata. They are passed through
3562
- to the provider from the AI SDK and enable provider-specific
3563
- functionality that can be fully encapsulated in the provider.
3564
- */
3565
- providerMetadata: ProviderMetadata | undefined;
3566
- }) => Promise<void> | void;
3567
- /**
3568
- Generate a structured, typed object for a given prompt and schema using a language model.
3569
-
3570
- This function streams the output. If you do not want to stream the output, use `generateObject` instead.
3582
+ Reason why the model finished generating a response.
3583
+ */
3584
+ readonly finishReason: FinishReason | undefined;
3585
+ constructor({ message, cause, text, response, usage, finishReason, }: {
3586
+ message?: string;
3587
+ cause?: Error;
3588
+ text?: string;
3589
+ response: LanguageModelResponseMetadata;
3590
+ usage: LanguageModelUsage;
3591
+ finishReason: FinishReason;
3592
+ });
3593
+ static isInstance(error: unknown): error is NoObjectGeneratedError;
3594
+ }
3571
3595
 
3572
- @param model - The language model to use.
3573
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
3596
+ declare const symbol$8: unique symbol;
3597
+ /**
3598
+ Thrown when no LLM output was generated, e.g. because of errors.
3599
+ */
3600
+ declare class NoOutputGeneratedError extends AISDKError {
3601
+ private readonly [symbol$8];
3602
+ constructor({ message, cause, }?: {
3603
+ message?: string;
3604
+ cause?: Error;
3605
+ });
3606
+ static isInstance(error: unknown): error is NoOutputGeneratedError;
3607
+ }
3574
3608
 
3575
- @param system - A system message that will be part of the prompt.
3576
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
3577
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3609
+ declare const symbol$7: unique symbol;
3610
+ /**
3611
+ Thrown when no output type is specified and output-related methods are called.
3612
+ */
3613
+ declare class NoOutputSpecifiedError extends AISDKError {
3614
+ private readonly [symbol$7];
3615
+ constructor({ message }?: {
3616
+ message?: string;
3617
+ });
3618
+ static isInstance(error: unknown): error is NoOutputSpecifiedError;
3619
+ }
3578
3620
 
3579
- @param maxOutputTokens - Maximum number of tokens to generate.
3580
- @param temperature - Temperature setting.
3581
- The value is passed through to the provider. The range depends on the provider and model.
3582
- It is recommended to set either `temperature` or `topP`, but not both.
3583
- @param topP - Nucleus sampling.
3584
- The value is passed through to the provider. The range depends on the provider and model.
3585
- It is recommended to set either `temperature` or `topP`, but not both.
3586
- @param topK - Only sample from the top K options for each subsequent token.
3587
- Used to remove "long tail" low probability responses.
3588
- Recommended for advanced use cases only. You usually only need to use temperature.
3589
- @param presencePenalty - Presence penalty setting.
3590
- It affects the likelihood of the model to repeat information that is already in the prompt.
3591
- The value is passed through to the provider. The range depends on the provider and model.
3592
- @param frequencyPenalty - Frequency penalty setting.
3593
- It affects the likelihood of the model to repeatedly use the same words or phrases.
3594
- The value is passed through to the provider. The range depends on the provider and model.
3595
- @param stopSequences - Stop sequences.
3596
- If set, the model will stop generating text when one of the stop sequences is generated.
3597
- @param seed - The seed (integer) to use for random sampling.
3598
- If set and supported by the model, calls will generate deterministic results.
3621
+ /**
3622
+ Error that is thrown when no speech audio was generated.
3623
+ */
3624
+ declare class NoSpeechGeneratedError extends AISDKError {
3625
+ readonly responses: Array<SpeechModelResponseMetadata>;
3626
+ constructor(options: {
3627
+ responses: Array<SpeechModelResponseMetadata>;
3628
+ });
3629
+ }
3599
3630
 
3600
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3601
- @param abortSignal - An optional abort signal that can be used to cancel the call.
3602
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3631
+ declare const symbol$6: unique symbol;
3632
+ declare class ToolCallRepairError extends AISDKError {
3633
+ private readonly [symbol$6];
3634
+ readonly originalError: NoSuchToolError | InvalidToolInputError;
3635
+ constructor({ cause, originalError, message, }: {
3636
+ message?: string;
3637
+ cause: unknown;
3638
+ originalError: NoSuchToolError | InvalidToolInputError;
3639
+ });
3640
+ static isInstance(error: unknown): error is ToolCallRepairError;
3641
+ }
3603
3642
 
3604
- @param schema - The schema of the object that the model should generate.
3605
- @param schemaName - Optional name of the output that should be generated.
3606
- Used by some providers for additional LLM guidance, e.g.
3607
- via tool or schema name.
3608
- @param schemaDescription - Optional description of the output that should be generated.
3609
- Used by some providers for additional LLM guidance, e.g.
3610
- via tool or schema description.
3643
+ /**
3644
+ Error that is thrown when a model with an unsupported version is used.
3645
+ */
3646
+ declare class UnsupportedModelVersionError extends AISDKError {
3647
+ readonly version: string;
3648
+ readonly provider: string;
3649
+ readonly modelId: string;
3650
+ constructor(options: {
3651
+ version: string;
3652
+ provider: string;
3653
+ modelId: string;
3654
+ });
3655
+ }
3611
3656
 
3612
- @param output - The type of the output.
3657
+ declare const symbol$5: unique symbol;
3658
+ declare class InvalidDataContentError extends AISDKError {
3659
+ private readonly [symbol$5];
3660
+ readonly content: unknown;
3661
+ constructor({ content, cause, message, }: {
3662
+ content: unknown;
3663
+ cause?: unknown;
3664
+ message?: string;
3665
+ });
3666
+ static isInstance(error: unknown): error is InvalidDataContentError;
3667
+ }
3613
3668
 
3614
- - 'object': The output is an object.
3615
- - 'array': The output is an array.
3616
- - 'enum': The output is an enum.
3617
- - 'no-schema': The output is not a schema.
3669
+ declare const symbol$4: unique symbol;
3670
+ declare class InvalidMessageRoleError extends AISDKError {
3671
+ private readonly [symbol$4];
3672
+ readonly role: string;
3673
+ constructor({ role, message, }: {
3674
+ role: string;
3675
+ message?: string;
3676
+ });
3677
+ static isInstance(error: unknown): error is InvalidMessageRoleError;
3678
+ }
3618
3679
 
3619
- @param experimental_telemetry - Optional telemetry configuration (experimental).
3680
+ declare const symbol$3: unique symbol;
3681
+ declare class MessageConversionError extends AISDKError {
3682
+ private readonly [symbol$3];
3683
+ readonly originalMessage: Omit<UIMessage, 'id'>;
3684
+ constructor({ originalMessage, message, }: {
3685
+ originalMessage: Omit<UIMessage, 'id'>;
3686
+ message: string;
3687
+ });
3688
+ static isInstance(error: unknown): error is MessageConversionError;
3689
+ }
3620
3690
 
3621
- @param providerOptions - Additional provider-specific options. They are passed through
3622
- to the provider from the AI SDK and enable provider-specific
3623
- functionality that can be fully encapsulated in the provider.
3691
+ declare const symbol$2: unique symbol;
3692
+ declare class DownloadError extends AISDKError {
3693
+ private readonly [symbol$2];
3694
+ readonly url: string;
3695
+ readonly statusCode?: number;
3696
+ readonly statusText?: string;
3697
+ constructor({ url, statusCode, statusText, cause, message, }: {
3698
+ url: string;
3699
+ statusCode?: number;
3700
+ statusText?: string;
3701
+ message?: string;
3702
+ cause?: unknown;
3703
+ });
3704
+ static isInstance(error: unknown): error is DownloadError;
3705
+ }
3624
3706
 
3625
- @returns
3626
- A result object for accessing the partial object stream and additional information.
3707
+ declare const symbol$1: unique symbol;
3708
+ type RetryErrorReason = 'maxRetriesExceeded' | 'errorNotRetryable' | 'abort';
3709
+ declare class RetryError extends AISDKError {
3710
+ private readonly [symbol$1];
3711
+ readonly reason: RetryErrorReason;
3712
+ readonly lastError: unknown;
3713
+ readonly errors: Array<unknown>;
3714
+ constructor({ message, reason, errors, }: {
3715
+ message: string;
3716
+ reason: RetryErrorReason;
3717
+ errors: Array<unknown>;
3718
+ });
3719
+ static isInstance(error: unknown): error is RetryError;
3720
+ }
3721
+
3722
+ /**
3723
+ The result of a `generateImage` call.
3724
+ It contains the images and additional information.
3627
3725
  */
3628
- declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
3726
+ interface GenerateImageResult {
3629
3727
  /**
3630
- The enum values that the model should use.
3631
- */
3632
- enum: Array<RESULT>;
3633
- mode?: 'json';
3634
- output: 'enum';
3635
- } : OUTPUT extends 'no-schema' ? {} : {
3728
+ The first image that was generated.
3729
+ */
3730
+ readonly image: GeneratedFile;
3636
3731
  /**
3637
- The schema of the object that the model should generate.
3638
- */
3639
- schema: SCHEMA;
3732
+ The images that were generated.
3733
+ */
3734
+ readonly images: Array<GeneratedFile>;
3640
3735
  /**
3641
- Optional name of the output that should be generated.
3642
- Used by some providers for additional LLM guidance, e.g.
3643
- via tool or schema name.
3644
- */
3645
- schemaName?: string;
3736
+ Warnings for the call, e.g. unsupported settings.
3737
+ */
3738
+ readonly warnings: Array<ImageGenerationWarning>;
3646
3739
  /**
3647
- Optional description of the output that should be generated.
3648
- Used by some providers for additional LLM guidance, e.g.
3649
- via tool or schema description.
3650
- */
3651
- schemaDescription?: string;
3740
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
3741
+ */
3742
+ readonly responses: Array<ImageModelResponseMetadata>;
3652
3743
  /**
3653
- The mode to use for object generation.
3654
-
3655
- The schema is converted into a JSON schema and used in one of the following ways
3744
+ * Provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific
3745
+ * results that can be fully encapsulated in the provider.
3746
+ */
3747
+ readonly providerMetadata: ImageModelProviderMetadata;
3748
+ }
3656
3749
 
3657
- - 'auto': The provider will choose the best mode for the model.
3658
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3659
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3750
+ /**
3751
+ Generates images using an image model.
3660
3752
 
3661
- Please note that most providers do not support all modes.
3753
+ @param model - The image model to use.
3754
+ @param prompt - The prompt that should be used to generate the image.
3755
+ @param n - Number of images to generate. Default: 1.
3756
+ @param size - Size of the images to generate. Must have the format `{width}x{height}`.
3757
+ @param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
3758
+ @param seed - Seed for the image generation.
3759
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
3760
+ as body parameters.
3761
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3762
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
3763
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3662
3764
 
3663
- Default and recommended: 'auto' (best mode for the model).
3664
- */
3665
- mode?: 'auto' | 'json' | 'tool';
3666
- }) & {
3667
- output?: OUTPUT;
3765
+ @returns A result object that contains the generated images.
3766
+ */
3767
+ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
3668
3768
  /**
3669
- The language model to use.
3670
- */
3671
- model: LanguageModel;
3769
+ The image model to use.
3770
+ */
3771
+ model: ImageModelV3;
3672
3772
  /**
3673
- A function that attempts to repair the raw output of the model
3674
- to enable JSON parsing.
3773
+ The prompt that should be used to generate the image.
3675
3774
  */
3676
- experimental_repairText?: RepairTextFunction;
3775
+ prompt: string;
3677
3776
  /**
3678
- Optional telemetry configuration (experimental).
3777
+ Number of images to generate.
3679
3778
  */
3680
- experimental_telemetry?: TelemetrySettings;
3779
+ n?: number;
3681
3780
  /**
3682
- Custom download function to use for URLs.
3683
-
3684
- By default, files are downloaded if the model does not support the URL for the given media type.
3781
+ Number of images to generate.
3685
3782
  */
3686
- experimental_download?: DownloadFunction | undefined;
3687
- /**
3688
- Additional provider-specific options. They are passed through
3689
- to the provider from the AI SDK and enable provider-specific
3690
- functionality that can be fully encapsulated in the provider.
3691
- */
3692
- providerOptions?: ProviderOptions;
3693
- /**
3694
- Callback that is invoked when an error occurs during streaming.
3695
- You can use it to log errors.
3696
- The stream processing will pause until the callback promise is resolved.
3697
- */
3698
- onError?: StreamObjectOnErrorCallback;
3783
+ maxImagesPerCall?: number;
3699
3784
  /**
3700
- Callback that is called when the LLM response and the final object validation are finished.
3701
- */
3702
- onFinish?: StreamObjectOnFinishCallback<RESULT>;
3785
+ Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
3786
+ */
3787
+ size?: `${number}x${number}`;
3703
3788
  /**
3704
- * Internal. For test use only. May change without notice.
3789
+ Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
3705
3790
  */
3706
- _internal?: {
3707
- generateId?: () => string;
3708
- currentDate?: () => Date;
3709
- now?: () => number;
3710
- };
3711
- }): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
3712
-
3713
- /**
3714
- * A generated audio file.
3715
- */
3716
- interface GeneratedAudioFile extends GeneratedFile {
3717
- /**
3718
- * Audio format of the file (e.g., 'mp3', 'wav', etc.)
3719
- */
3720
- readonly format: string;
3721
- }
3722
-
3723
- /**
3724
- The result of a `generateSpeech` call.
3725
- It contains the audio data and additional information.
3726
- */
3727
- interface SpeechResult {
3728
- /**
3729
- * The audio data as a base64 encoded string or binary data.
3730
- */
3731
- readonly audio: GeneratedAudioFile;
3732
- /**
3733
- Warnings for the call, e.g. unsupported settings.
3734
- */
3735
- readonly warnings: Array<SpeechWarning>;
3736
- /**
3737
- Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
3738
- */
3739
- readonly responses: Array<SpeechModelResponseMetadata>;
3740
- /**
3741
- Provider metadata from the provider.
3742
- */
3743
- readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
3744
- }
3745
-
3746
- /**
3747
- Generates speech audio using a speech model.
3748
-
3749
- @param model - The speech model to use.
3750
- @param text - The text to convert to speech.
3751
- @param voice - The voice to use for speech generation.
3752
- @param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
3753
- @param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
3754
- @param speed - The speed of the speech generation.
3755
- @param providerOptions - Additional provider-specific options that are passed through to the provider
3756
- as body parameters.
3757
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3758
- @param abortSignal - An optional abort signal that can be used to cancel the call.
3759
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3760
-
3761
- @returns A result object that contains the generated audio data.
3762
- */
3763
- declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
3764
- /**
3765
- The speech model to use.
3766
- */
3767
- model: SpeechModel;
3768
- /**
3769
- The text to convert to speech.
3770
- */
3771
- text: string;
3772
- /**
3773
- The voice to use for speech generation.
3774
- */
3775
- voice?: string;
3776
- /**
3777
- * The desired output format for the audio e.g. "mp3", "wav", etc.
3778
- */
3779
- outputFormat?: 'mp3' | 'wav' | (string & {});
3780
- /**
3781
- Instructions for the speech generation e.g. "Speak in a slow and steady tone".
3782
- */
3783
- instructions?: string;
3784
- /**
3785
- The speed of the speech generation.
3786
- */
3787
- speed?: number;
3791
+ aspectRatio?: `${number}:${number}`;
3788
3792
  /**
3789
- The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
3790
- or "auto" for automatic language detection. Provider support varies.
3793
+ Seed for the image generation. If not provided, the default seed will be used.
3791
3794
  */
3792
- language?: string;
3795
+ seed?: number;
3793
3796
  /**
3794
3797
  Additional provider-specific options that are passed through to the provider
3795
3798
  as body parameters.
@@ -3798,13 +3801,15 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
3798
3801
  record is keyed by the provider-specific metadata key.
3799
3802
  ```ts
3800
3803
  {
3801
- "openai": {}
3804
+ "openai": {
3805
+ "style": "vivid"
3806
+ }
3802
3807
  }
3803
3808
  ```
3804
3809
  */
3805
3810
  providerOptions?: ProviderOptions;
3806
3811
  /**
3807
- Maximum number of retries per speech model call. Set to 0 to disable retries.
3812
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
3808
3813
 
3809
3814
  @default 2
3810
3815
  */
@@ -3818,1086 +3823,1111 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
3818
3823
  Only applicable for HTTP-based providers.
3819
3824
  */
3820
3825
  headers?: Record<string, string>;
3821
- }): Promise<SpeechResult>;
3822
-
3823
- type Warning = LanguageModelV3CallWarning | ImageModelV3CallWarning | SpeechModelV3CallWarning | TranscriptionModelV3CallWarning;
3824
- type LogWarningsFunction = (warnings: Warning[]) => void;
3826
+ }): Promise<GenerateImageResult>;
3825
3827
 
3826
3828
  /**
3827
- * Applies default settings for a language model.
3829
+ The result of a `generateObject` call.
3828
3830
  */
3829
- declare function defaultSettingsMiddleware({ settings, }: {
3830
- settings: Partial<{
3831
- maxOutputTokens?: LanguageModelV3CallOptions['maxOutputTokens'];
3832
- temperature?: LanguageModelV3CallOptions['temperature'];
3833
- stopSequences?: LanguageModelV3CallOptions['stopSequences'];
3834
- topP?: LanguageModelV3CallOptions['topP'];
3835
- topK?: LanguageModelV3CallOptions['topK'];
3836
- presencePenalty?: LanguageModelV3CallOptions['presencePenalty'];
3837
- frequencyPenalty?: LanguageModelV3CallOptions['frequencyPenalty'];
3838
- responseFormat?: LanguageModelV3CallOptions['responseFormat'];
3839
- seed?: LanguageModelV3CallOptions['seed'];
3840
- tools?: LanguageModelV3CallOptions['tools'];
3841
- toolChoice?: LanguageModelV3CallOptions['toolChoice'];
3842
- headers?: LanguageModelV3CallOptions['headers'];
3843
- providerOptions?: LanguageModelV3CallOptions['providerOptions'];
3844
- }>;
3845
- }): LanguageModelMiddleware;
3831
+ interface GenerateObjectResult<OBJECT> {
3832
+ /**
3833
+ The generated object (typed according to the schema).
3834
+ */
3835
+ readonly object: OBJECT;
3836
+ /**
3837
+ * The reasoning that was used to generate the object.
3838
+ * Concatenated from all reasoning parts.
3839
+ */
3840
+ readonly reasoning: string | undefined;
3841
+ /**
3842
+ The reason why the generation finished.
3843
+ */
3844
+ readonly finishReason: FinishReason;
3845
+ /**
3846
+ The token usage of the generated text.
3847
+ */
3848
+ readonly usage: LanguageModelUsage;
3849
+ /**
3850
+ Warnings from the model provider (e.g. unsupported settings).
3851
+ */
3852
+ readonly warnings: CallWarning[] | undefined;
3853
+ /**
3854
+ Additional request information.
3855
+ */
3856
+ readonly request: LanguageModelRequestMetadata;
3857
+ /**
3858
+ Additional response information.
3859
+ */
3860
+ readonly response: LanguageModelResponseMetadata & {
3861
+ /**
3862
+ Response body (available only for providers that use HTTP requests).
3863
+ */
3864
+ body?: unknown;
3865
+ };
3866
+ /**
3867
+ Additional provider-specific metadata. They are passed through
3868
+ from the provider to the AI SDK and enable provider-specific
3869
+ results that can be fully encapsulated in the provider.
3870
+ */
3871
+ readonly providerMetadata: ProviderMetadata | undefined;
3872
+ /**
3873
+ Converts the object to a JSON response.
3874
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
3875
+ */
3876
+ toJsonResponse(init?: ResponseInit): Response;
3877
+ }
3846
3878
 
3847
3879
  /**
3848
- * Extract an XML-tagged reasoning section from the generated text and exposes it
3849
- * as a `reasoning` property on the result.
3850
- *
3851
- * @param tagName - The name of the XML tag to extract reasoning from.
3852
- * @param separator - The separator to use between reasoning and text sections.
3853
- * @param startWithReasoning - Whether to start with reasoning tokens.
3854
- */
3855
- declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
3856
- tagName: string;
3857
- separator?: string;
3858
- startWithReasoning?: boolean;
3859
- }): LanguageModelMiddleware;
3880
+ A function that attempts to repair the raw output of the model
3881
+ to enable JSON parsing.
3860
3882
 
3861
- /**
3862
- * Simulates streaming chunks with the response from a generate call.
3863
- */
3864
- declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
3883
+ Should return the repaired text or null if the text cannot be repaired.
3884
+ */
3885
+ type RepairTextFunction = (options: {
3886
+ text: string;
3887
+ error: JSONParseError | TypeValidationError;
3888
+ }) => Promise<string | null>;
3865
3889
 
3866
3890
  /**
3867
- * Wraps a LanguageModelV3 instance with middleware functionality.
3868
- * This function allows you to apply middleware to transform parameters,
3869
- * wrap generate operations, and wrap stream operations of a language model.
3870
- *
3871
- * @param options - Configuration options for wrapping the language model.
3872
- * @param options.model - The original LanguageModelV3 instance to be wrapped.
3873
- * @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
3874
- * @param options.modelId - Optional custom model ID to override the original model's ID.
3875
- * @param options.providerId - Optional custom provider ID to override the original model's provider ID.
3876
- * @returns A new LanguageModelV3 instance with middleware applied.
3877
- */
3878
- declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
3879
- model: LanguageModelV3;
3880
- middleware: LanguageModelMiddleware | LanguageModelMiddleware[];
3881
- modelId?: string;
3882
- providerId?: string;
3883
- }) => LanguageModelV3;
3891
+ Generate a structured, typed object for a given prompt and schema using a language model.
3884
3892
 
3885
- /**
3886
- * Wraps a ProviderV3 instance with middleware functionality.
3887
- * This function allows you to apply middleware to all language models
3888
- * from the provider, enabling you to transform parameters, wrap generate
3889
- * operations, and wrap stream operations for every language model.
3890
- *
3891
- * @param options - Configuration options for wrapping the provider.
3892
- * @param options.provider - The original ProviderV3 instance to be wrapped.
3893
- * @param options.languageModelMiddleware - The middleware to be applied to all language models from the provider. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
3894
- * @returns A new ProviderV3 instance with middleware applied to all language models.
3895
- */
3896
- declare function wrapProvider({ provider, languageModelMiddleware, }: {
3897
- provider: ProviderV3 | ProviderV2;
3898
- languageModelMiddleware: LanguageModelMiddleware | LanguageModelMiddleware[];
3899
- }): ProviderV3;
3893
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
3900
3894
 
3901
- /**
3902
- * Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
3903
- *
3904
- * @param {Object} options - The options for creating the custom provider.
3905
- * @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
3906
- * @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
3907
- * @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
3908
- * @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
3909
- * @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
3910
- * @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
3911
- * @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
3912
- *
3913
- * @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
3914
- */
3915
- declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
3916
- languageModels?: LANGUAGE_MODELS;
3917
- textEmbeddingModels?: EMBEDDING_MODELS;
3918
- imageModels?: IMAGE_MODELS;
3919
- transcriptionModels?: TRANSCRIPTION_MODELS;
3920
- speechModels?: SPEECH_MODELS;
3921
- fallbackProvider?: ProviderV3 | ProviderV2;
3922
- }): ProviderV3 & {
3923
- languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV3;
3924
- textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
3925
- imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV3;
3926
- transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV3;
3927
- speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV3;
3928
- };
3929
- /**
3930
- * @deprecated Use `customProvider` instead.
3931
- */
3932
- declare const experimental_customProvider: typeof customProvider;
3933
- type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
3895
+ @param model - The language model to use.
3896
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
3934
3897
 
3935
- declare const symbol: unique symbol;
3936
- declare class NoSuchProviderError extends NoSuchModelError {
3937
- private readonly [symbol];
3938
- readonly providerId: string;
3939
- readonly availableProviders: string[];
3940
- constructor({ modelId, modelType, providerId, availableProviders, message, }: {
3941
- modelId: string;
3942
- modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
3943
- providerId: string;
3944
- availableProviders: string[];
3945
- message?: string;
3946
- });
3947
- static isInstance(error: unknown): error is NoSuchProviderError;
3948
- }
3898
+ @param system - A system message that will be part of the prompt.
3899
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
3900
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
3949
3901
 
3950
- type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
3951
- interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV3> = Record<string, ProviderV3>, SEPARATOR extends string = ':'> {
3952
- languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV3;
3953
- languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV3;
3954
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
3955
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
3956
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV3;
3957
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV3;
3958
- transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV3;
3959
- transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV3;
3960
- speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV3;
3961
- speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV3;
3962
- }
3963
- /**
3964
- * Creates a registry for the given providers with optional middleware functionality.
3965
- * This function allows you to register multiple providers and optionally apply middleware
3966
- * to all language models from the registry, enabling you to transform parameters, wrap generate
3967
- * operations, and wrap stream operations for every language model accessed through the registry.
3968
- *
3969
- * @param providers - A record of provider instances to be registered in the registry.
3970
- * @param options - Configuration options for the provider registry.
3971
- * @param options.separator - The separator used between provider ID and model ID in the combined identifier. Defaults to ':'.
3972
- * @param options.languageModelMiddleware - Optional middleware to be applied to all language models from the registry. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
3973
- * @returns A new ProviderRegistryProvider instance that provides access to all registered providers with optional middleware applied to language models.
3974
- */
3975
- declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV3>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, languageModelMiddleware, }?: {
3976
- separator?: SEPARATOR;
3977
- languageModelMiddleware?: LanguageModelMiddleware | LanguageModelMiddleware[];
3978
- }): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
3979
- /**
3980
- * @deprecated Use `createProviderRegistry` instead.
3981
- */
3982
- declare const experimental_createProviderRegistry: typeof createProviderRegistry;
3902
+ @param maxOutputTokens - Maximum number of tokens to generate.
3903
+ @param temperature - Temperature setting.
3904
+ The value is passed through to the provider. The range depends on the provider and model.
3905
+ It is recommended to set either `temperature` or `topP`, but not both.
3906
+ @param topP - Nucleus sampling.
3907
+ The value is passed through to the provider. The range depends on the provider and model.
3908
+ It is recommended to set either `temperature` or `topP`, but not both.
3909
+ @param topK - Only sample from the top K options for each subsequent token.
3910
+ Used to remove "long tail" low probability responses.
3911
+ Recommended for advanced use cases only. You usually only need to use temperature.
3912
+ @param presencePenalty - Presence penalty setting.
3913
+ It affects the likelihood of the model to repeat information that is already in the prompt.
3914
+ The value is passed through to the provider. The range depends on the provider and model.
3915
+ @param frequencyPenalty - Frequency penalty setting.
3916
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
3917
+ The value is passed through to the provider. The range depends on the provider and model.
3918
+ @param stopSequences - Stop sequences.
3919
+ If set, the model will stop generating text when one of the stop sequences is generated.
3920
+ @param seed - The seed (integer) to use for random sampling.
3921
+ If set and supported by the model, calls will generate deterministic results.
3983
3922
 
3984
- declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
3985
- textStream: ReadableStream<string>;
3986
- }): Response;
3923
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3924
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
3925
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3987
3926
 
3988
- declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
3989
- response: ServerResponse;
3990
- textStream: ReadableStream<string>;
3991
- } & ResponseInit): void;
3927
+ @param schema - The schema of the object that the model should generate.
3928
+ @param schemaName - Optional name of the output that should be generated.
3929
+ Used by some providers for additional LLM guidance, e.g.
3930
+ via tool or schema name.
3931
+ @param schemaDescription - Optional description of the output that should be generated.
3932
+ Used by some providers for additional LLM guidance, e.g.
3933
+ via tool or schema description.
3992
3934
 
3993
- declare const JSONRPCRequestSchema: z.ZodObject<{
3994
- jsonrpc: z.ZodLiteral<"2.0">;
3995
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
3996
- method: z.ZodString;
3997
- params: z.ZodOptional<z.ZodObject<{
3998
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
3999
- }, z.core.$loose>>;
4000
- }, z.core.$strict>;
4001
- type JSONRPCRequest = z.infer<typeof JSONRPCRequestSchema>;
4002
- declare const JSONRPCResponseSchema: z.ZodObject<{
4003
- jsonrpc: z.ZodLiteral<"2.0">;
4004
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4005
- result: z.ZodObject<{
4006
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4007
- }, z.core.$loose>;
4008
- }, z.core.$strict>;
4009
- type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
4010
- declare const JSONRPCErrorSchema: z.ZodObject<{
4011
- jsonrpc: z.ZodLiteral<"2.0">;
4012
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4013
- error: z.ZodObject<{
4014
- code: z.ZodNumber;
4015
- message: z.ZodString;
4016
- data: z.ZodOptional<z.ZodUnknown>;
4017
- }, z.core.$strip>;
4018
- }, z.core.$strict>;
4019
- type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
4020
- declare const JSONRPCNotificationSchema: z.ZodObject<{
4021
- jsonrpc: z.ZodLiteral<"2.0">;
4022
- method: z.ZodString;
4023
- params: z.ZodOptional<z.ZodObject<{
4024
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4025
- }, z.core.$loose>>;
4026
- }, z.core.$strict>;
4027
- type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
4028
- declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
4029
- jsonrpc: z.ZodLiteral<"2.0">;
4030
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4031
- method: z.ZodString;
4032
- params: z.ZodOptional<z.ZodObject<{
4033
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4034
- }, z.core.$loose>>;
4035
- }, z.core.$strict>, z.ZodObject<{
4036
- jsonrpc: z.ZodLiteral<"2.0">;
4037
- method: z.ZodString;
4038
- params: z.ZodOptional<z.ZodObject<{
4039
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4040
- }, z.core.$loose>>;
4041
- }, z.core.$strict>, z.ZodObject<{
4042
- jsonrpc: z.ZodLiteral<"2.0">;
4043
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4044
- result: z.ZodObject<{
4045
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4046
- }, z.core.$loose>;
4047
- }, z.core.$strict>, z.ZodObject<{
4048
- jsonrpc: z.ZodLiteral<"2.0">;
4049
- id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4050
- error: z.ZodObject<{
4051
- code: z.ZodNumber;
4052
- message: z.ZodString;
4053
- data: z.ZodOptional<z.ZodUnknown>;
4054
- }, z.core.$strip>;
4055
- }, z.core.$strict>]>;
4056
- type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
3935
+ @param output - The type of the output.
4057
3936
 
4058
- /**
4059
- * Transport interface for MCP (Model Context Protocol) communication.
4060
- * Maps to the `Transport` interface in the MCP spec.
3937
+ - 'object': The output is an object.
3938
+ - 'array': The output is an array.
3939
+ - 'enum': The output is an enum.
3940
+ - 'no-schema': The output is not a schema.
3941
+
3942
+ @param experimental_repairText - A function that attempts to repair the raw output of the model
3943
+ to enable JSON parsing.
3944
+
3945
+ @param experimental_telemetry - Optional telemetry configuration (experimental).
3946
+
3947
+ @param providerOptions - Additional provider-specific options. They are passed through
3948
+ to the provider from the AI SDK and enable provider-specific
3949
+ functionality that can be fully encapsulated in the provider.
3950
+
3951
+ @returns
3952
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
4061
3953
  */
4062
- interface MCPTransport {
3954
+ declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
4063
3955
  /**
4064
- * Initialize and start the transport
4065
- */
4066
- start(): Promise<void>;
3956
+ The enum values that the model should use.
3957
+ */
3958
+ enum: Array<RESULT>;
3959
+ mode?: 'json';
3960
+ output: 'enum';
3961
+ } : OUTPUT extends 'no-schema' ? {} : {
4067
3962
  /**
4068
- * Send a JSON-RPC message through the transport
4069
- * @param message The JSON-RPC message to send
4070
- */
4071
- send(message: JSONRPCMessage): Promise<void>;
3963
+ The schema of the object that the model should generate.
3964
+ */
3965
+ schema: SCHEMA;
4072
3966
  /**
4073
- * Clean up and close the transport
4074
- */
4075
- close(): Promise<void>;
3967
+ Optional name of the output that should be generated.
3968
+ Used by some providers for additional LLM guidance, e.g.
3969
+ via tool or schema name.
3970
+ */
3971
+ schemaName?: string;
4076
3972
  /**
4077
- * Event handler for transport closure
4078
- */
4079
- onclose?: () => void;
3973
+ Optional description of the output that should be generated.
3974
+ Used by some providers for additional LLM guidance, e.g.
3975
+ via tool or schema description.
3976
+ */
3977
+ schemaDescription?: string;
4080
3978
  /**
4081
- * Event handler for transport errors
3979
+ The mode to use for object generation.
3980
+
3981
+ The schema is converted into a JSON schema and used in one of the following ways
3982
+
3983
+ - 'auto': The provider will choose the best mode for the model.
3984
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
3985
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
3986
+
3987
+ Please note that most providers do not support all modes.
3988
+
3989
+ Default and recommended: 'auto' (best mode for the model).
3990
+ */
3991
+ mode?: 'auto' | 'json' | 'tool';
3992
+ }) & {
3993
+ output?: OUTPUT;
3994
+ /**
3995
+ The language model to use.
4082
3996
  */
4083
- onerror?: (error: Error) => void;
3997
+ model: LanguageModel;
4084
3998
  /**
4085
- * Event handler for received messages
3999
+ A function that attempts to repair the raw output of the model
4000
+ to enable JSON parsing.
4086
4001
  */
4087
- onmessage?: (message: JSONRPCMessage) => void;
4088
- }
4089
- type MCPTransportConfig = {
4090
- type: 'sse';
4002
+ experimental_repairText?: RepairTextFunction;
4091
4003
  /**
4092
- * The URL of the MCP server.
4004
+ Optional telemetry configuration (experimental).
4005
+ */
4006
+ experimental_telemetry?: TelemetrySettings;
4007
+ /**
4008
+ Custom download function to use for URLs.
4009
+
4010
+ By default, files are downloaded if the model does not support the URL for the given media type.
4093
4011
  */
4094
- url: string;
4012
+ experimental_download?: DownloadFunction | undefined;
4095
4013
  /**
4096
- * Additional HTTP headers to be sent with requests.
4014
+ Additional provider-specific options. They are passed through
4015
+ to the provider from the AI SDK and enable provider-specific
4016
+ functionality that can be fully encapsulated in the provider.
4017
+ */
4018
+ providerOptions?: ProviderOptions;
4019
+ /**
4020
+ * Internal. For test use only. May change without notice.
4097
4021
  */
4098
- headers?: Record<string, string>;
4099
- };
4100
-
4101
- type ToolSchemas = Record<string, {
4102
- inputSchema: FlexibleSchema<JSONObject | unknown>;
4103
- }> | 'automatic' | undefined;
4104
- type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
4105
- inputSchema: FlexibleSchema<any>;
4106
- }> ? {
4107
- [K in keyof TOOL_SCHEMAS]: TOOL_SCHEMAS[K] extends {
4108
- inputSchema: FlexibleSchema<infer INPUT>;
4109
- } ? Tool<INPUT, CallToolResult> & Required<Pick<Tool<INPUT, CallToolResult>, 'execute'>> : never;
4110
- } : McpToolSet<Record<string, {
4111
- inputSchema: FlexibleSchema<unknown>;
4112
- }>>;
4113
- declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<{
4114
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4115
- content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
4116
- type: z.ZodLiteral<"text">;
4117
- text: z.ZodString;
4118
- }, z.core.$loose>, z.ZodObject<{
4119
- type: z.ZodLiteral<"image">;
4120
- data: z.ZodBase64;
4121
- mimeType: z.ZodString;
4122
- }, z.core.$loose>, z.ZodObject<{
4123
- type: z.ZodLiteral<"resource">;
4124
- resource: z.ZodUnion<readonly [z.ZodObject<{
4125
- uri: z.ZodString;
4126
- mimeType: z.ZodOptional<z.ZodString>;
4127
- text: z.ZodString;
4128
- }, z.core.$loose>, z.ZodObject<{
4129
- uri: z.ZodString;
4130
- mimeType: z.ZodOptional<z.ZodString>;
4131
- blob: z.ZodBase64;
4132
- }, z.core.$loose>]>;
4133
- }, z.core.$loose>]>>;
4134
- isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
4135
- }, z.core.$loose>, z.ZodObject<{
4136
- _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4137
- toolResult: z.ZodUnknown;
4138
- }, z.core.$loose>]>;
4139
- type CallToolResult = z.infer<typeof CallToolResultSchema>;
4022
+ _internal?: {
4023
+ generateId?: () => string;
4024
+ currentDate?: () => Date;
4025
+ };
4026
+ }): Promise<GenerateObjectResult<RESULT>>;
4140
4027
 
4141
- interface MCPClientConfig {
4142
- /** Transport configuration for connecting to the MCP server */
4143
- transport: MCPTransportConfig | MCPTransport;
4144
- /** Optional callback for uncaught errors */
4145
- onUncaughtError?: (error: unknown) => void;
4146
- /** Optional client name, defaults to 'ai-sdk-mcp-client' */
4147
- name?: string;
4148
- }
4149
- declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
4150
- interface MCPClient {
4151
- tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>(options?: {
4152
- schemas?: TOOL_SCHEMAS;
4153
- }): Promise<McpToolSet<TOOL_SCHEMAS>>;
4154
- close: () => Promise<void>;
4028
+ /**
4029
+ * Consumes a ReadableStream until it's fully read.
4030
+ *
4031
+ * This function reads the stream chunk by chunk until the stream is exhausted.
4032
+ * It doesn't process or return the data from the stream; it simply ensures
4033
+ * that the entire stream is read.
4034
+ *
4035
+ * @param {ReadableStream} stream - The ReadableStream to be consumed.
4036
+ * @returns {Promise<void>} A promise that resolves when the stream is fully consumed.
4037
+ */
4038
+ declare function consumeStream({ stream, onError, }: {
4039
+ stream: ReadableStream;
4040
+ onError?: (error: unknown) => void;
4041
+ }): Promise<void>;
4042
+
4043
+ /**
4044
+ * Calculates the cosine similarity between two vectors. This is a useful metric for
4045
+ * comparing the similarity of two vectors such as embeddings.
4046
+ *
4047
+ * @param vector1 - The first vector.
4048
+ * @param vector2 - The second vector.
4049
+ *
4050
+ * @returns The cosine similarity between vector1 and vector2.
4051
+ * @returns 0 if either vector is the zero vector.
4052
+ *
4053
+ * @throws {InvalidArgumentError} If the vectors do not have the same length.
4054
+ */
4055
+ declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
4056
+
4057
+ /**
4058
+ * Converts a data URL of type text/* to a text string.
4059
+ */
4060
+ declare function getTextFromDataUrl(dataUrl: string): string;
4061
+
4062
+ /**
4063
+ * Performs a deep-equal comparison of two parsed JSON objects.
4064
+ *
4065
+ * @param {any} obj1 - The first object to compare.
4066
+ * @param {any} obj2 - The second object to compare.
4067
+ * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
4068
+ */
4069
+ declare function isDeepEqualData(obj1: any, obj2: any): boolean;
4070
+
4071
+ declare function parsePartialJson(jsonText: string | undefined): Promise<{
4072
+ value: JSONValue$1 | undefined;
4073
+ state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
4074
+ }>;
4075
+
4076
+ type Job = () => Promise<void>;
4077
+
4078
+ declare class SerialJobExecutor {
4079
+ private queue;
4080
+ private isProcessing;
4081
+ private processQueue;
4082
+ run(job: Job): Promise<void>;
4155
4083
  }
4156
4084
 
4157
4085
  /**
4158
- The result of a `transcribe` call.
4159
- It contains the transcript and additional information.
4086
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
4087
+ *
4088
+ * @param options - The configuration options
4089
+ * @param options.chunks - Array of values to be emitted by the stream
4090
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
4091
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
4092
+ * @returns A ReadableStream that emits the provided values
4160
4093
  */
4161
- interface TranscriptionResult {
4094
+ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
4095
+ chunks: T[];
4096
+ initialDelayInMs?: number | null;
4097
+ chunkDelayInMs?: number | null;
4098
+ _internal?: {
4099
+ delay?: (ms: number | null) => Promise<void>;
4100
+ };
4101
+ }): ReadableStream<T>;
4102
+
4103
+ /**
4104
+ The result of a `streamObject` call that contains the partial object stream and additional information.
4105
+ */
4106
+ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
4162
4107
  /**
4163
- * The complete transcribed text from the audio.
4164
- */
4165
- readonly text: string;
4108
+ Warnings from the model provider (e.g. unsupported settings)
4109
+ */
4110
+ readonly warnings: Promise<CallWarning[] | undefined>;
4166
4111
  /**
4167
- * Array of transcript segments with timing information.
4168
- * Each segment represents a portion of the transcribed text with start and end times.
4169
- */
4170
- readonly segments: Array<{
4171
- /**
4172
- * The text content of this segment.
4173
- */
4174
- readonly text: string;
4175
- /**
4176
- * The start time of this segment in seconds.
4177
- */
4178
- readonly startSecond: number;
4179
- /**
4180
- * The end time of this segment in seconds.
4181
- */
4182
- readonly endSecond: number;
4183
- }>;
4112
+ The token usage of the generated response. Resolved when the response is finished.
4113
+ */
4114
+ readonly usage: Promise<LanguageModelUsage>;
4184
4115
  /**
4185
- * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4186
- * May be undefined if the language couldn't be detected.
4116
+ Additional provider-specific metadata. They are passed through
4117
+ from the provider to the AI SDK and enable provider-specific
4118
+ results that can be fully encapsulated in the provider.
4187
4119
  */
4188
- readonly language: string | undefined;
4120
+ readonly providerMetadata: Promise<ProviderMetadata | undefined>;
4189
4121
  /**
4190
- * The total duration of the audio file in seconds.
4191
- * May be undefined if the duration couldn't be determined.
4192
- */
4193
- readonly durationInSeconds: number | undefined;
4122
+ Additional request information from the last step.
4123
+ */
4124
+ readonly request: Promise<LanguageModelRequestMetadata>;
4194
4125
  /**
4195
- Warnings for the call, e.g. unsupported settings.
4126
+ Additional response information.
4127
+ */
4128
+ readonly response: Promise<LanguageModelResponseMetadata>;
4129
+ /**
4130
+ The reason why the generation finished. Taken from the last step.
4131
+
4132
+ Resolved when the response is finished.
4196
4133
  */
4197
- readonly warnings: Array<TranscriptionWarning>;
4134
+ readonly finishReason: Promise<FinishReason>;
4198
4135
  /**
4199
- Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4200
- */
4201
- readonly responses: Array<TranscriptionModelResponseMetadata>;
4136
+ The generated object (typed according to the schema). Resolved when the response is finished.
4137
+ */
4138
+ readonly object: Promise<RESULT>;
4202
4139
  /**
4203
- Provider metadata from the provider.
4140
+ Stream of partial objects. It gets more complete as the stream progresses.
4141
+
4142
+ Note that the partial object is not validated.
4143
+ If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
4144
+ */
4145
+ readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
4146
+ /**
4147
+ * Stream over complete array elements. Only available if the output strategy is set to `array`.
4204
4148
  */
4205
- readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4206
- }
4207
-
4208
- /**
4209
- Generates transcripts using a transcription model.
4210
-
4211
- @param model - The transcription model to use.
4212
- @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4213
- @param providerOptions - Additional provider-specific options that are passed through to the provider
4214
- as body parameters.
4215
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4216
- @param abortSignal - An optional abort signal that can be used to cancel the call.
4217
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4218
-
4219
- @returns A result object that contains the generated transcript.
4220
- */
4221
- declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4149
+ readonly elementStream: ELEMENT_STREAM;
4222
4150
  /**
4223
- The transcription model to use.
4151
+ Text stream of the JSON representation of the generated object. It contains text chunks.
4152
+ When the stream is finished, the object is valid JSON that can be parsed.
4224
4153
  */
4225
- model: TranscriptionModel;
4154
+ readonly textStream: AsyncIterableStream<string>;
4226
4155
  /**
4227
- The audio data to transcribe.
4228
- */
4229
- audio: DataContent | URL;
4156
+ Stream of different types of events, including partial objects, errors, and finish events.
4157
+ Only errors that stop the stream, such as network errors, are thrown.
4158
+ */
4159
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
4230
4160
  /**
4231
- Additional provider-specific options that are passed through to the provider
4232
- as body parameters.
4161
+ Writes text delta output to a Node.js response-like object.
4162
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
4163
+ writes each text delta as a separate chunk.
4233
4164
 
4234
- The outer record is keyed by the provider name, and the inner
4235
- record is keyed by the provider-specific metadata key.
4236
- ```ts
4237
- {
4238
- "openai": {
4239
- "temperature": 0
4240
- }
4241
- }
4242
- ```
4165
+ @param response A Node.js response-like object (ServerResponse).
4166
+ @param init Optional headers, status code, and status text.
4243
4167
  */
4244
- providerOptions?: ProviderOptions;
4168
+ pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
4245
4169
  /**
4246
- Maximum number of retries per transcript model call. Set to 0 to disable retries.
4170
+ Creates a simple text stream response.
4171
+ The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
4172
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
4173
+ Non-text-delta events are ignored.
4247
4174
 
4248
- @default 2
4249
- */
4250
- maxRetries?: number;
4175
+ @param init Optional headers, status code, and status text.
4176
+ */
4177
+ toTextStreamResponse(init?: ResponseInit): Response;
4178
+ }
4179
+ type ObjectStreamPart<PARTIAL> = {
4180
+ type: 'object';
4181
+ object: PARTIAL;
4182
+ } | {
4183
+ type: 'text-delta';
4184
+ textDelta: string;
4185
+ } | {
4186
+ type: 'error';
4187
+ error: unknown;
4188
+ } | {
4189
+ type: 'finish';
4190
+ finishReason: FinishReason;
4191
+ usage: LanguageModelUsage;
4192
+ response: LanguageModelResponseMetadata;
4193
+ providerMetadata?: ProviderMetadata;
4194
+ };
4195
+
4196
+ /**
4197
+ Callback that is set using the `onError` option.
4198
+
4199
+ @param event - The event that is passed to the callback.
4200
+ */
4201
+ type StreamObjectOnErrorCallback = (event: {
4202
+ error: unknown;
4203
+ }) => Promise<void> | void;
4204
+ /**
4205
+ Callback that is set using the `onFinish` option.
4206
+
4207
+ @param event - The event that is passed to the callback.
4208
+ */
4209
+ type StreamObjectOnFinishCallback<RESULT> = (event: {
4251
4210
  /**
4252
- Abort signal.
4253
- */
4254
- abortSignal?: AbortSignal;
4211
+ The token usage of the generated response.
4212
+ */
4213
+ usage: LanguageModelUsage;
4255
4214
  /**
4256
- Additional headers to include in the request.
4257
- Only applicable for HTTP-based providers.
4215
+ The generated object. Can be undefined if the final object does not match the schema.
4216
+ */
4217
+ object: RESULT | undefined;
4218
+ /**
4219
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
4220
+ */
4221
+ error: unknown | undefined;
4222
+ /**
4223
+ Response metadata.
4258
4224
  */
4259
- headers?: Record<string, string>;
4260
- }): Promise<TranscriptionResult>;
4225
+ response: LanguageModelResponseMetadata;
4226
+ /**
4227
+ Warnings from the model provider (e.g. unsupported settings).
4228
+ */
4229
+ warnings?: CallWarning[];
4230
+ /**
4231
+ Additional provider-specific metadata. They are passed through
4232
+ to the provider from the AI SDK and enable provider-specific
4233
+ functionality that can be fully encapsulated in the provider.
4234
+ */
4235
+ providerMetadata: ProviderMetadata | undefined;
4236
+ }) => Promise<void> | void;
4237
+ /**
4238
+ Generate a structured, typed object for a given prompt and schema using a language model.
4261
4239
 
4262
- declare const getOriginalFetch: () => typeof fetch;
4263
- declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
4264
- api: string;
4265
- prompt: string;
4266
- credentials: RequestCredentials | undefined;
4267
- headers: HeadersInit | undefined;
4268
- body: Record<string, any>;
4269
- streamProtocol: 'data' | 'text' | undefined;
4270
- setCompletion: (completion: string) => void;
4271
- setLoading: (loading: boolean) => void;
4272
- setError: (error: Error | undefined) => void;
4273
- setAbortController: (abortController: AbortController | null) => void;
4274
- onFinish: ((prompt: string, completion: string) => void) | undefined;
4275
- onError: ((error: Error) => void) | undefined;
4276
- fetch: ReturnType<typeof getOriginalFetch> | undefined;
4277
- }): Promise<string | null | undefined>;
4240
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
4278
4241
 
4279
- interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
4242
+ @param model - The language model to use.
4243
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
4244
+
4245
+ @param system - A system message that will be part of the prompt.
4246
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
4247
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
4248
+
4249
+ @param maxOutputTokens - Maximum number of tokens to generate.
4250
+ @param temperature - Temperature setting.
4251
+ The value is passed through to the provider. The range depends on the provider and model.
4252
+ It is recommended to set either `temperature` or `topP`, but not both.
4253
+ @param topP - Nucleus sampling.
4254
+ The value is passed through to the provider. The range depends on the provider and model.
4255
+ It is recommended to set either `temperature` or `topP`, but not both.
4256
+ @param topK - Only sample from the top K options for each subsequent token.
4257
+ Used to remove "long tail" low probability responses.
4258
+ Recommended for advanced use cases only. You usually only need to use temperature.
4259
+ @param presencePenalty - Presence penalty setting.
4260
+ It affects the likelihood of the model to repeat information that is already in the prompt.
4261
+ The value is passed through to the provider. The range depends on the provider and model.
4262
+ @param frequencyPenalty - Frequency penalty setting.
4263
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
4264
+ The value is passed through to the provider. The range depends on the provider and model.
4265
+ @param stopSequences - Stop sequences.
4266
+ If set, the model will stop generating text when one of the stop sequences is generated.
4267
+ @param seed - The seed (integer) to use for random sampling.
4268
+ If set and supported by the model, calls will generate deterministic results.
4269
+
4270
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4271
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4272
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4273
+
4274
+ @param schema - The schema of the object that the model should generate.
4275
+ @param schemaName - Optional name of the output that should be generated.
4276
+ Used by some providers for additional LLM guidance, e.g.
4277
+ via tool or schema name.
4278
+ @param schemaDescription - Optional description of the output that should be generated.
4279
+ Used by some providers for additional LLM guidance, e.g.
4280
+ via tool or schema description.
4281
+
4282
+ @param output - The type of the output.
4283
+
4284
+ - 'object': The output is an object.
4285
+ - 'array': The output is an array.
4286
+ - 'enum': The output is an enum.
4287
+ - 'no-schema': The output is not a schema.
4288
+
4289
+ @param experimental_telemetry - Optional telemetry configuration (experimental).
4290
+
4291
+ @param providerOptions - Additional provider-specific options. They are passed through
4292
+ to the provider from the AI SDK and enable provider-specific
4293
+ functionality that can be fully encapsulated in the provider.
4294
+
4295
+ @returns
4296
+ A result object for accessing the partial object stream and additional information.
4297
+ */
4298
+ declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
4280
4299
  /**
4281
- * Appends a data stream part to the stream.
4282
- */
4283
- write(part: InferUIMessageChunk<UI_MESSAGE>): void;
4300
+ The enum values that the model should use.
4301
+ */
4302
+ enum: Array<RESULT>;
4303
+ mode?: 'json';
4304
+ output: 'enum';
4305
+ } : OUTPUT extends 'no-schema' ? {} : {
4284
4306
  /**
4285
- * Merges the contents of another stream to this stream.
4307
+ The schema of the object that the model should generate.
4308
+ */
4309
+ schema: SCHEMA;
4310
+ /**
4311
+ Optional name of the output that should be generated.
4312
+ Used by some providers for additional LLM guidance, e.g.
4313
+ via tool or schema name.
4314
+ */
4315
+ schemaName?: string;
4316
+ /**
4317
+ Optional description of the output that should be generated.
4318
+ Used by some providers for additional LLM guidance, e.g.
4319
+ via tool or schema description.
4320
+ */
4321
+ schemaDescription?: string;
4322
+ /**
4323
+ The mode to use for object generation.
4324
+
4325
+ The schema is converted into a JSON schema and used in one of the following ways
4326
+
4327
+ - 'auto': The provider will choose the best mode for the model.
4328
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4329
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4330
+
4331
+ Please note that most providers do not support all modes.
4332
+
4333
+ Default and recommended: 'auto' (best mode for the model).
4334
+ */
4335
+ mode?: 'auto' | 'json' | 'tool';
4336
+ }) & {
4337
+ output?: OUTPUT;
4338
+ /**
4339
+ The language model to use.
4340
+ */
4341
+ model: LanguageModel;
4342
+ /**
4343
+ A function that attempts to repair the raw output of the model
4344
+ to enable JSON parsing.
4286
4345
  */
4287
- merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
4346
+ experimental_repairText?: RepairTextFunction;
4288
4347
  /**
4289
- * Error handler that is used by the data stream writer.
4290
- * This is intended for forwarding when merging streams
4291
- * to prevent duplicated error masking.
4348
+ Optional telemetry configuration (experimental).
4292
4349
  */
4293
- onError: ErrorHandler | undefined;
4294
- }
4350
+ experimental_telemetry?: TelemetrySettings;
4351
+ /**
4352
+ Custom download function to use for URLs.
4295
4353
 
4296
- declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
4297
- execute: (options: {
4298
- writer: UIMessageStreamWriter<UI_MESSAGE>;
4299
- }) => Promise<void> | void;
4300
- onError?: (error: unknown) => string;
4354
+ By default, files are downloaded if the model does not support the URL for the given media type.
4355
+ */
4356
+ experimental_download?: DownloadFunction | undefined;
4301
4357
  /**
4302
- * The original messages. If they are provided, persistence mode is assumed,
4303
- * and a message ID is provided for the response message.
4358
+ Additional provider-specific options. They are passed through
4359
+ to the provider from the AI SDK and enable provider-specific
4360
+ functionality that can be fully encapsulated in the provider.
4361
+ */
4362
+ providerOptions?: ProviderOptions;
4363
+ /**
4364
+ Callback that is invoked when an error occurs during streaming.
4365
+ You can use it to log errors.
4366
+ The stream processing will pause until the callback promise is resolved.
4367
+ */
4368
+ onError?: StreamObjectOnErrorCallback;
4369
+ /**
4370
+ Callback that is called when the LLM response and the final object validation are finished.
4371
+ */
4372
+ onFinish?: StreamObjectOnFinishCallback<RESULT>;
4373
+ /**
4374
+ * Internal. For test use only. May change without notice.
4304
4375
  */
4305
- originalMessages?: UI_MESSAGE[];
4306
- onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
4307
- generateId?: IdGenerator;
4308
- }): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
4309
-
4310
- declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
4311
- stream: ReadableStream<UIMessageChunk>;
4312
- }): Response;
4376
+ _internal?: {
4377
+ generateId?: () => string;
4378
+ currentDate?: () => Date;
4379
+ now?: () => number;
4380
+ };
4381
+ }): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
4313
4382
 
4314
- declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
4315
- constructor();
4383
+ /**
4384
+ * A generated audio file.
4385
+ */
4386
+ interface GeneratedAudioFile extends GeneratedFile {
4387
+ /**
4388
+ * Audio format of the file (e.g., 'mp3', 'wav', etc.)
4389
+ */
4390
+ readonly format: string;
4316
4391
  }
4317
4392
 
4318
- declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
4319
- response: ServerResponse;
4320
- stream: ReadableStream<UIMessageChunk>;
4321
- } & UIMessageStreamResponseInit): void;
4322
-
4323
4393
  /**
4324
- * Transforms a stream of `UIMessageChunk`s into an `AsyncIterableStream` of `UIMessage`s.
4325
- *
4326
- * @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
4327
- * @param options.stream - The stream of `UIMessageChunk`s to read.
4328
- * @param options.terminateOnError - Whether to terminate the stream if an error occurs.
4329
- * @param options.onError - A function that is called when an error occurs.
4330
- *
4331
- * @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
4332
- * as it is being completed.
4394
+ The result of a `generateSpeech` call.
4395
+ It contains the audio data and additional information.
4333
4396
  */
4334
- declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, onError, terminateOnError, }: {
4335
- message?: UI_MESSAGE;
4336
- stream: ReadableStream<UIMessageChunk>;
4337
- onError?: (error: unknown) => void;
4338
- terminateOnError?: boolean;
4339
- }): AsyncIterableStream<UI_MESSAGE>;
4340
-
4341
- declare const UI_MESSAGE_STREAM_HEADERS: {
4342
- 'content-type': string;
4343
- 'cache-control': string;
4344
- connection: string;
4345
- 'x-vercel-ai-ui-message-stream': string;
4346
- 'x-accel-buffering': string;
4347
- };
4397
+ interface SpeechResult {
4398
+ /**
4399
+ * The audio data as a base64 encoded string or binary data.
4400
+ */
4401
+ readonly audio: GeneratedAudioFile;
4402
+ /**
4403
+ Warnings for the call, e.g. unsupported settings.
4404
+ */
4405
+ readonly warnings: Array<SpeechWarning>;
4406
+ /**
4407
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4408
+ */
4409
+ readonly responses: Array<SpeechModelResponseMetadata>;
4410
+ /**
4411
+ Provider metadata from the provider.
4412
+ */
4413
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4414
+ }
4348
4415
 
4349
4416
  /**
4350
- * Transport interface for handling chat message communication and streaming.
4351
- *
4352
- * The `ChatTransport` interface provides fine-grained control over how messages
4353
- * are sent to API endpoints and how responses are processed. This enables
4354
- * alternative communication protocols like WebSockets, custom authentication
4355
- * patterns, or specialized backend integrations.
4356
- *
4357
- * @template UI_MESSAGE - The UI message type extending UIMessage
4417
+ Generates speech audio using a speech model.
4418
+
4419
+ @param model - The speech model to use.
4420
+ @param text - The text to convert to speech.
4421
+ @param voice - The voice to use for speech generation.
4422
+ @param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
4423
+ @param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
4424
+ @param speed - The speed of the speech generation.
4425
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4426
+ as body parameters.
4427
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4428
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4429
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4430
+
4431
+ @returns A result object that contains the generated audio data.
4358
4432
  */
4359
- interface ChatTransport<UI_MESSAGE extends UIMessage> {
4433
+ declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4360
4434
  /**
4361
- * Sends messages to the chat API endpoint and returns a streaming response.
4362
- *
4363
- * This method handles both new message submission and message regeneration.
4364
- * It supports real-time streaming of responses through UIMessageChunk events.
4365
- *
4366
- * @param options - Configuration object containing:
4367
- * @param options.trigger - The type of message submission:
4368
- * - `'submit-message'`: Submitting a new user message
4369
- * - `'regenerate-message'`: Regenerating an assistant response
4370
- * @param options.chatId - Unique identifier for the chat session
4371
- * @param options.messageId - ID of the message to regenerate (for regenerate-message trigger) or undefined for new messages
4372
- * @param options.messages - Array of UI messages representing the conversation history
4373
- * @param options.abortSignal - Signal to abort the request if needed
4374
- * @param options.headers - Additional HTTP headers to include in the request
4375
- * @param options.body - Additional JSON properties to include in the request body
4376
- * @param options.metadata - Custom metadata to attach to the request
4377
- *
4378
- * @returns Promise resolving to a ReadableStream of UIMessageChunk objects.
4379
- * The stream emits various chunk types like:
4380
- * - `text-start`, `text-delta`, `text-end`: For streaming text content
4381
- * - `tool-input-start`, `tool-input-delta`, `tool-input-available`: For tool calls
4382
- * - `data-part-start`, `data-part-delta`, `data-part-available`: For data parts
4383
- * - `error`: For error handling
4384
- *
4385
- * @throws Error when the API request fails or response is invalid
4435
+ The speech model to use.
4436
+ */
4437
+ model: SpeechModel;
4438
+ /**
4439
+ The text to convert to speech.
4386
4440
  */
4387
- sendMessages: (options: {
4388
- /** The type of message submission - either new message or regeneration */
4389
- trigger: 'submit-message' | 'regenerate-message';
4390
- /** Unique identifier for the chat session */
4391
- chatId: string;
4392
- /** ID of the message to regenerate, or undefined for new messages */
4393
- messageId: string | undefined;
4394
- /** Array of UI messages representing the conversation history */
4395
- messages: UI_MESSAGE[];
4396
- /** Signal to abort the request if needed */
4397
- abortSignal: AbortSignal | undefined;
4398
- } & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
4441
+ text: string;
4399
4442
  /**
4400
- * Reconnects to an existing streaming response for the specified chat session.
4401
- *
4402
- * This method is used to resume streaming when a connection is interrupted
4403
- * or when resuming a chat session. It's particularly useful for maintaining
4404
- * continuity in long-running conversations or recovering from network issues.
4405
- *
4406
- * @param options - Configuration object containing:
4407
- * @param options.chatId - Unique identifier for the chat session to reconnect to
4408
- * @param options.headers - Additional HTTP headers to include in the reconnection request
4409
- * @param options.body - Additional JSON properties to include in the request body
4410
- * @param options.metadata - Custom metadata to attach to the request
4411
- *
4412
- * @returns Promise resolving to:
4413
- * - `ReadableStream<UIMessageChunk>`: If an active stream is found and can be resumed
4414
- * - `null`: If no active stream exists for the specified chat session (e.g., response already completed)
4415
- *
4416
- * @throws Error when the reconnection request fails or response is invalid
4443
+ The voice to use for speech generation.
4417
4444
  */
4418
- reconnectToStream: (options: {
4419
- /** Unique identifier for the chat session to reconnect to */
4420
- chatId: string;
4421
- } & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
4422
- }
4423
-
4424
- type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
4425
- id?: UI_MESSAGE['id'];
4426
- role?: UI_MESSAGE['role'];
4427
- };
4428
- type UIDataPartSchemas = Record<string, FlexibleSchema>;
4429
- type UIDataTypesToSchemas<T extends UIDataTypes> = {
4430
- [K in keyof T]: FlexibleSchema<T[K]>;
4431
- };
4432
- type InferUIDataParts<T extends UIDataPartSchemas> = {
4433
- [K in keyof T]: InferSchema<T[K]>;
4434
- };
4435
- type ChatRequestOptions = {
4445
+ voice?: string;
4436
4446
  /**
4437
- Additional headers that should be to be passed to the API endpoint.
4447
+ * The desired output format for the audio e.g. "mp3", "wav", etc.
4438
4448
  */
4439
- headers?: Record<string, string> | Headers;
4449
+ outputFormat?: 'mp3' | 'wav' | (string & {});
4440
4450
  /**
4441
- Additional body JSON properties that should be sent to the API endpoint.
4451
+ Instructions for the speech generation e.g. "Speak in a slow and steady tone".
4452
+ */
4453
+ instructions?: string;
4454
+ /**
4455
+ The speed of the speech generation.
4442
4456
  */
4443
- body?: object;
4444
- metadata?: unknown;
4445
- };
4446
- /**
4447
- * Function that can be called to add a tool approval response to the chat.
4448
- */
4449
- type ChatAddToolApproveResponseFunction = ({ id, approved, reason, }: {
4450
- id: string;
4457
+ speed?: number;
4451
4458
  /**
4452
- * Flag indicating whether the approval was granted or denied.
4459
+ The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
4460
+ or "auto" for automatic language detection. Provider support varies.
4453
4461
  */
4454
- approved: boolean;
4462
+ language?: string;
4455
4463
  /**
4456
- * Optional reason for the approval or denial.
4464
+ Additional provider-specific options that are passed through to the provider
4465
+ as body parameters.
4466
+
4467
+ The outer record is keyed by the provider name, and the inner
4468
+ record is keyed by the provider-specific metadata key.
4469
+ ```ts
4470
+ {
4471
+ "openai": {}
4472
+ }
4473
+ ```
4474
+ */
4475
+ providerOptions?: ProviderOptions;
4476
+ /**
4477
+ Maximum number of retries per speech model call. Set to 0 to disable retries.
4478
+
4479
+ @default 2
4457
4480
  */
4458
- reason?: string;
4459
- }) => void | PromiseLike<void>;
4460
- type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
4461
- interface ChatState<UI_MESSAGE extends UIMessage> {
4462
- status: ChatStatus;
4463
- error: Error | undefined;
4464
- messages: UI_MESSAGE[];
4465
- pushMessage: (message: UI_MESSAGE) => void;
4466
- popMessage: () => void;
4467
- replaceMessage: (index: number, message: UI_MESSAGE) => void;
4468
- snapshot: <T>(thing: T) => T;
4469
- }
4470
- type ChatOnErrorCallback = (error: Error) => void;
4471
- type ChatOnToolCallCallback<UI_MESSAGE extends UIMessage = UIMessage> = (options: {
4472
- toolCall: InferUIMessageToolCall<UI_MESSAGE>;
4473
- }) => void | PromiseLike<void>;
4474
- type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
4481
+ maxRetries?: number;
4482
+ /**
4483
+ Abort signal.
4484
+ */
4485
+ abortSignal?: AbortSignal;
4486
+ /**
4487
+ Additional headers to include in the request.
4488
+ Only applicable for HTTP-based providers.
4489
+ */
4490
+ headers?: Record<string, string>;
4491
+ }): Promise<SpeechResult>;
4492
+
4493
+ type Warning = LanguageModelV3CallWarning | ImageModelV3CallWarning | SpeechModelV3CallWarning | TranscriptionModelV3CallWarning;
4494
+ type LogWarningsFunction = (warnings: Warning[]) => void;
4495
+
4475
4496
  /**
4476
- * Function that is called when the assistant response has finished streaming.
4497
+ * Applies default settings for a language model.
4498
+ */
4499
+ declare function defaultSettingsMiddleware({ settings, }: {
4500
+ settings: Partial<{
4501
+ maxOutputTokens?: LanguageModelV3CallOptions['maxOutputTokens'];
4502
+ temperature?: LanguageModelV3CallOptions['temperature'];
4503
+ stopSequences?: LanguageModelV3CallOptions['stopSequences'];
4504
+ topP?: LanguageModelV3CallOptions['topP'];
4505
+ topK?: LanguageModelV3CallOptions['topK'];
4506
+ presencePenalty?: LanguageModelV3CallOptions['presencePenalty'];
4507
+ frequencyPenalty?: LanguageModelV3CallOptions['frequencyPenalty'];
4508
+ responseFormat?: LanguageModelV3CallOptions['responseFormat'];
4509
+ seed?: LanguageModelV3CallOptions['seed'];
4510
+ tools?: LanguageModelV3CallOptions['tools'];
4511
+ toolChoice?: LanguageModelV3CallOptions['toolChoice'];
4512
+ headers?: LanguageModelV3CallOptions['headers'];
4513
+ providerOptions?: LanguageModelV3CallOptions['providerOptions'];
4514
+ }>;
4515
+ }): LanguageModelMiddleware;
4516
+
4517
+ /**
4518
+ * Extract an XML-tagged reasoning section from the generated text and exposes it
4519
+ * as a `reasoning` property on the result.
4477
4520
  *
4478
- * @param message The assistant message that was streamed.
4479
- * @param messages The full chat history, including the assistant message.
4521
+ * @param tagName - The name of the XML tag to extract reasoning from.
4522
+ * @param separator - The separator to use between reasoning and text sections.
4523
+ * @param startWithReasoning - Whether to start with reasoning tokens.
4524
+ */
4525
+ declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
4526
+ tagName: string;
4527
+ separator?: string;
4528
+ startWithReasoning?: boolean;
4529
+ }): LanguageModelMiddleware;
4530
+
4531
+ /**
4532
+ * Simulates streaming chunks with the response from a generate call.
4533
+ */
4534
+ declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
4535
+
4536
+ /**
4537
+ * Wraps a LanguageModelV3 instance with middleware functionality.
4538
+ * This function allows you to apply middleware to transform parameters,
4539
+ * wrap generate operations, and wrap stream operations of a language model.
4480
4540
  *
4481
- * @param isAbort Indicates whether the request has been aborted.
4482
- * @param isDisconnect Indicates whether the request has been ended by a network error.
4483
- * @param isError Indicates whether the request has been ended by an error.
4541
+ * @param options - Configuration options for wrapping the language model.
4542
+ * @param options.model - The original LanguageModelV3 instance to be wrapped.
4543
+ * @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
4544
+ * @param options.modelId - Optional custom model ID to override the original model's ID.
4545
+ * @param options.providerId - Optional custom provider ID to override the original model's provider ID.
4546
+ * @returns A new LanguageModelV3 instance with middleware applied.
4484
4547
  */
4485
- type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
4486
- message: UI_MESSAGE;
4487
- messages: UI_MESSAGE[];
4488
- isAbort: boolean;
4489
- isDisconnect: boolean;
4490
- isError: boolean;
4491
- }) => void;
4492
- interface ChatInit<UI_MESSAGE extends UIMessage> {
4548
+ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
4549
+ model: LanguageModelV3;
4550
+ middleware: LanguageModelMiddleware | LanguageModelMiddleware[];
4551
+ modelId?: string;
4552
+ providerId?: string;
4553
+ }) => LanguageModelV3;
4554
+
4555
+ /**
4556
+ * Wraps a ProviderV3 instance with middleware functionality.
4557
+ * This function allows you to apply middleware to all language models
4558
+ * from the provider, enabling you to transform parameters, wrap generate
4559
+ * operations, and wrap stream operations for every language model.
4560
+ *
4561
+ * @param options - Configuration options for wrapping the provider.
4562
+ * @param options.provider - The original ProviderV3 instance to be wrapped.
4563
+ * @param options.languageModelMiddleware - The middleware to be applied to all language models from the provider. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
4564
+ * @returns A new ProviderV3 instance with middleware applied to all language models.
4565
+ */
4566
+ declare function wrapProvider({ provider, languageModelMiddleware, }: {
4567
+ provider: ProviderV3 | ProviderV2;
4568
+ languageModelMiddleware: LanguageModelMiddleware | LanguageModelMiddleware[];
4569
+ }): ProviderV3;
4570
+
4571
+ /**
4572
+ * Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
4573
+ *
4574
+ * @param {Object} options - The options for creating the custom provider.
4575
+ * @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
4576
+ * @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
4577
+ * @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
4578
+ * @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
4579
+ * @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
4580
+ * @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
4581
+ * @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
4582
+ *
4583
+ * @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
4584
+ */
4585
+ declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
4586
+ languageModels?: LANGUAGE_MODELS;
4587
+ textEmbeddingModels?: EMBEDDING_MODELS;
4588
+ imageModels?: IMAGE_MODELS;
4589
+ transcriptionModels?: TRANSCRIPTION_MODELS;
4590
+ speechModels?: SPEECH_MODELS;
4591
+ fallbackProvider?: ProviderV3 | ProviderV2;
4592
+ }): ProviderV3 & {
4593
+ languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV3;
4594
+ textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
4595
+ imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV3;
4596
+ transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV3;
4597
+ speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV3;
4598
+ };
4599
+ /**
4600
+ * @deprecated Use `customProvider` instead.
4601
+ */
4602
+ declare const experimental_customProvider: typeof customProvider;
4603
+ type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
4604
+
4605
+ declare const symbol: unique symbol;
4606
+ declare class NoSuchProviderError extends NoSuchModelError {
4607
+ private readonly [symbol];
4608
+ readonly providerId: string;
4609
+ readonly availableProviders: string[];
4610
+ constructor({ modelId, modelType, providerId, availableProviders, message, }: {
4611
+ modelId: string;
4612
+ modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
4613
+ providerId: string;
4614
+ availableProviders: string[];
4615
+ message?: string;
4616
+ });
4617
+ static isInstance(error: unknown): error is NoSuchProviderError;
4618
+ }
4619
+
4620
+ type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
4621
+ interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV3> = Record<string, ProviderV3>, SEPARATOR extends string = ':'> {
4622
+ languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV3;
4623
+ languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV3;
4624
+ textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
4625
+ textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
4626
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV3;
4627
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV3;
4628
+ transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV3;
4629
+ transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV3;
4630
+ speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV3;
4631
+ speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV3;
4632
+ }
4633
+ /**
4634
+ * Creates a registry for the given providers with optional middleware functionality.
4635
+ * This function allows you to register multiple providers and optionally apply middleware
4636
+ * to all language models from the registry, enabling you to transform parameters, wrap generate
4637
+ * operations, and wrap stream operations for every language model accessed through the registry.
4638
+ *
4639
+ * @param providers - A record of provider instances to be registered in the registry.
4640
+ * @param options - Configuration options for the provider registry.
4641
+ * @param options.separator - The separator used between provider ID and model ID in the combined identifier. Defaults to ':'.
4642
+ * @param options.languageModelMiddleware - Optional middleware to be applied to all language models from the registry. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
4643
+ * @returns A new ProviderRegistryProvider instance that provides access to all registered providers with optional middleware applied to language models.
4644
+ */
4645
+ declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV3>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, languageModelMiddleware, }?: {
4646
+ separator?: SEPARATOR;
4647
+ languageModelMiddleware?: LanguageModelMiddleware | LanguageModelMiddleware[];
4648
+ }): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
4649
+ /**
4650
+ * @deprecated Use `createProviderRegistry` instead.
4651
+ */
4652
+ declare const experimental_createProviderRegistry: typeof createProviderRegistry;
4653
+
4654
+ declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
4655
+ textStream: ReadableStream<string>;
4656
+ }): Response;
4657
+
4658
+ declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
4659
+ response: ServerResponse;
4660
+ textStream: ReadableStream<string>;
4661
+ } & ResponseInit): void;
4662
+
4663
+ declare const JSONRPCRequestSchema: z.ZodObject<{
4664
+ jsonrpc: z.ZodLiteral<"2.0">;
4665
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4666
+ method: z.ZodString;
4667
+ params: z.ZodOptional<z.ZodObject<{
4668
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4669
+ }, z.core.$loose>>;
4670
+ }, z.core.$strict>;
4671
+ type JSONRPCRequest = z.infer<typeof JSONRPCRequestSchema>;
4672
+ declare const JSONRPCResponseSchema: z.ZodObject<{
4673
+ jsonrpc: z.ZodLiteral<"2.0">;
4674
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4675
+ result: z.ZodObject<{
4676
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4677
+ }, z.core.$loose>;
4678
+ }, z.core.$strict>;
4679
+ type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
4680
+ declare const JSONRPCErrorSchema: z.ZodObject<{
4681
+ jsonrpc: z.ZodLiteral<"2.0">;
4682
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4683
+ error: z.ZodObject<{
4684
+ code: z.ZodNumber;
4685
+ message: z.ZodString;
4686
+ data: z.ZodOptional<z.ZodUnknown>;
4687
+ }, z.core.$strip>;
4688
+ }, z.core.$strict>;
4689
+ type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
4690
+ declare const JSONRPCNotificationSchema: z.ZodObject<{
4691
+ jsonrpc: z.ZodLiteral<"2.0">;
4692
+ method: z.ZodString;
4693
+ params: z.ZodOptional<z.ZodObject<{
4694
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4695
+ }, z.core.$loose>>;
4696
+ }, z.core.$strict>;
4697
+ type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
4698
+ declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
4699
+ jsonrpc: z.ZodLiteral<"2.0">;
4700
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4701
+ method: z.ZodString;
4702
+ params: z.ZodOptional<z.ZodObject<{
4703
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4704
+ }, z.core.$loose>>;
4705
+ }, z.core.$strict>, z.ZodObject<{
4706
+ jsonrpc: z.ZodLiteral<"2.0">;
4707
+ method: z.ZodString;
4708
+ params: z.ZodOptional<z.ZodObject<{
4709
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4710
+ }, z.core.$loose>>;
4711
+ }, z.core.$strict>, z.ZodObject<{
4712
+ jsonrpc: z.ZodLiteral<"2.0">;
4713
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4714
+ result: z.ZodObject<{
4715
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4716
+ }, z.core.$loose>;
4717
+ }, z.core.$strict>, z.ZodObject<{
4718
+ jsonrpc: z.ZodLiteral<"2.0">;
4719
+ id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
4720
+ error: z.ZodObject<{
4721
+ code: z.ZodNumber;
4722
+ message: z.ZodString;
4723
+ data: z.ZodOptional<z.ZodUnknown>;
4724
+ }, z.core.$strip>;
4725
+ }, z.core.$strict>]>;
4726
+ type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
4727
+
4728
+ /**
4729
+ * Transport interface for MCP (Model Context Protocol) communication.
4730
+ * Maps to the `Transport` interface in the MCP spec.
4731
+ */
4732
+ interface MCPTransport {
4493
4733
  /**
4494
- * A unique identifier for the chat. If not provided, a random one will be
4495
- * generated.
4734
+ * Initialize and start the transport
4496
4735
  */
4497
- id?: string;
4498
- messageMetadataSchema?: FlexibleSchema<InferUIMessageMetadata<UI_MESSAGE>>;
4499
- dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
4500
- messages?: UI_MESSAGE[];
4736
+ start(): Promise<void>;
4501
4737
  /**
4502
- * A way to provide a function that is going to be used for ids for messages and the chat.
4503
- * If not provided the default AI SDK `generateId` is used.
4738
+ * Send a JSON-RPC message through the transport
4739
+ * @param message The JSON-RPC message to send
4504
4740
  */
4505
- generateId?: IdGenerator;
4506
- transport?: ChatTransport<UI_MESSAGE>;
4741
+ send(message: JSONRPCMessage): Promise<void>;
4507
4742
  /**
4508
- * Callback function to be called when an error is encountered.
4743
+ * Clean up and close the transport
4509
4744
  */
4510
- onError?: ChatOnErrorCallback;
4511
- /**
4512
- Optional callback function that is invoked when a tool call is received.
4513
- Intended for automatic client-side tool execution.
4514
-
4515
- You can optionally return a result for the tool call,
4516
- either synchronously or asynchronously.
4517
- */
4518
- onToolCall?: ChatOnToolCallCallback<UI_MESSAGE>;
4745
+ close(): Promise<void>;
4519
4746
  /**
4520
- * Function that is called when the assistant response has finished streaming.
4747
+ * Event handler for transport closure
4521
4748
  */
4522
- onFinish?: ChatOnFinishCallback<UI_MESSAGE>;
4749
+ onclose?: () => void;
4523
4750
  /**
4524
- * Optional callback function that is called when a data part is received.
4525
- *
4526
- * @param data The data part that was received.
4751
+ * Event handler for transport errors
4527
4752
  */
4528
- onData?: ChatOnDataCallback<UI_MESSAGE>;
4753
+ onerror?: (error: Error) => void;
4529
4754
  /**
4530
- * When provided, this function will be called when the stream is finished or a tool call is added
4531
- * to determine if the current messages should be resubmitted.
4755
+ * Event handler for received messages
4532
4756
  */
4533
- sendAutomaticallyWhen?: (options: {
4534
- messages: UI_MESSAGE[];
4535
- }) => boolean | PromiseLike<boolean>;
4757
+ onmessage?: (message: JSONRPCMessage) => void;
4536
4758
  }
4537
- declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
4538
- readonly id: string;
4539
- readonly generateId: IdGenerator;
4540
- protected state: ChatState<UI_MESSAGE>;
4541
- private messageMetadataSchema;
4542
- private dataPartSchemas;
4543
- private readonly transport;
4544
- private onError?;
4545
- private onToolCall?;
4546
- private onFinish?;
4547
- private onData?;
4548
- private sendAutomaticallyWhen?;
4549
- private activeResponse;
4550
- private jobExecutor;
4551
- constructor({ generateId, id, transport, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, sendAutomaticallyWhen, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
4552
- state: ChatState<UI_MESSAGE>;
4553
- });
4554
- /**
4555
- * Hook status:
4556
- *
4557
- * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
4558
- * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
4559
- * - `ready`: The full response has been received and processed; a new user message can be submitted.
4560
- * - `error`: An error occurred during the API request, preventing successful completion.
4561
- */
4562
- get status(): ChatStatus;
4563
- protected setStatus({ status, error, }: {
4564
- status: ChatStatus;
4565
- error?: Error;
4566
- }): void;
4567
- get error(): Error | undefined;
4568
- get messages(): UI_MESSAGE[];
4569
- get lastMessage(): UI_MESSAGE | undefined;
4570
- set messages(messages: UI_MESSAGE[]);
4571
- /**
4572
- * Appends or replaces a user message to the chat list. This triggers the API call to fetch
4573
- * the assistant's response.
4574
- *
4575
- * If a messageId is provided, the message will be replaced.
4576
- */
4577
- sendMessage: (message?: (CreateUIMessage<UI_MESSAGE> & {
4578
- text?: never;
4579
- files?: never;
4580
- messageId?: string;
4581
- }) | {
4582
- text: string;
4583
- files?: FileList | FileUIPart[];
4584
- metadata?: InferUIMessageMetadata<UI_MESSAGE>;
4585
- parts?: never;
4586
- messageId?: string;
4587
- } | {
4588
- files: FileList | FileUIPart[];
4589
- metadata?: InferUIMessageMetadata<UI_MESSAGE>;
4590
- parts?: never;
4591
- messageId?: string;
4592
- }, options?: ChatRequestOptions) => Promise<void>;
4593
- /**
4594
- * Regenerate the assistant message with the provided message id.
4595
- * If no message id is provided, the last assistant message will be regenerated.
4596
- */
4597
- regenerate: ({ messageId, ...options }?: {
4598
- messageId?: string;
4599
- } & ChatRequestOptions) => Promise<void>;
4600
- /**
4601
- * Attempt to resume an ongoing streaming response.
4602
- */
4603
- resumeStream: (options?: ChatRequestOptions) => Promise<void>;
4759
+ type MCPTransportConfig = {
4760
+ type: 'sse';
4604
4761
  /**
4605
- * Clear the error state and set the status to ready if the chat is in an error state.
4762
+ * The URL of the MCP server.
4606
4763
  */
4607
- clearError: () => void;
4608
- addToolApprovalResponse: ChatAddToolApproveResponseFunction;
4609
- addToolResult: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ state, tool, toolCallId, output, errorText, }: {
4610
- state?: "output-available";
4611
- tool: TOOL;
4612
- toolCallId: string;
4613
- output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
4614
- errorText?: never;
4615
- } | {
4616
- state: "output-error";
4617
- tool: TOOL;
4618
- toolCallId: string;
4619
- output?: never;
4620
- errorText: string;
4621
- }) => Promise<void>;
4764
+ url: string;
4622
4765
  /**
4623
- * Abort the current request immediately, keep the generated tokens if any.
4766
+ * Additional HTTP headers to be sent with requests.
4624
4767
  */
4625
- stop: () => Promise<void>;
4626
- private makeRequest;
4627
- }
4628
-
4629
- declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
4630
-
4631
- /**
4632
- Converts an array of UI messages from useChat into an array of ModelMessages that can be used
4633
- with the AI functions (e.g. `streamText`, `generateText`).
4768
+ headers?: Record<string, string>;
4769
+ };
4634
4770
 
4635
- @param messages - The UI messages to convert.
4636
- @param options.tools - The tools to use.
4637
- @param options.ignoreIncompleteToolCalls - Whether to ignore incomplete tool calls. Default is `false`.
4771
+ type ToolSchemas = Record<string, {
4772
+ inputSchema: FlexibleSchema<JSONObject | unknown>;
4773
+ }> | 'automatic' | undefined;
4774
+ type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
4775
+ inputSchema: FlexibleSchema<any>;
4776
+ }> ? {
4777
+ [K in keyof TOOL_SCHEMAS]: TOOL_SCHEMAS[K] extends {
4778
+ inputSchema: FlexibleSchema<infer INPUT>;
4779
+ } ? Tool<INPUT, CallToolResult> & Required<Pick<Tool<INPUT, CallToolResult>, 'execute'>> : never;
4780
+ } : McpToolSet<Record<string, {
4781
+ inputSchema: FlexibleSchema<unknown>;
4782
+ }>>;
4783
+ declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<{
4784
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4785
+ content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
4786
+ type: z.ZodLiteral<"text">;
4787
+ text: z.ZodString;
4788
+ }, z.core.$loose>, z.ZodObject<{
4789
+ type: z.ZodLiteral<"image">;
4790
+ data: z.ZodBase64;
4791
+ mimeType: z.ZodString;
4792
+ }, z.core.$loose>, z.ZodObject<{
4793
+ type: z.ZodLiteral<"resource">;
4794
+ resource: z.ZodUnion<readonly [z.ZodObject<{
4795
+ uri: z.ZodString;
4796
+ mimeType: z.ZodOptional<z.ZodString>;
4797
+ text: z.ZodString;
4798
+ }, z.core.$loose>, z.ZodObject<{
4799
+ uri: z.ZodString;
4800
+ mimeType: z.ZodOptional<z.ZodString>;
4801
+ blob: z.ZodBase64;
4802
+ }, z.core.$loose>]>;
4803
+ }, z.core.$loose>]>>;
4804
+ isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
4805
+ }, z.core.$loose>, z.ZodObject<{
4806
+ _meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
4807
+ toolResult: z.ZodUnknown;
4808
+ }, z.core.$loose>]>;
4809
+ type CallToolResult = z.infer<typeof CallToolResultSchema>;
4638
4810
 
4639
- @returns An array of ModelMessages.
4640
- */
4641
- declare function convertToModelMessages(messages: Array<Omit<UIMessage, 'id'>>, options?: {
4642
- tools?: ToolSet;
4643
- ignoreIncompleteToolCalls?: boolean;
4644
- }): ModelMessage[];
4645
- /**
4646
- @deprecated Use `convertToModelMessages` instead.
4647
- */
4648
- declare const convertToCoreMessages: typeof convertToModelMessages;
4811
+ interface MCPClientConfig {
4812
+ /** Transport configuration for connecting to the MCP server */
4813
+ transport: MCPTransportConfig | MCPTransport;
4814
+ /** Optional callback for uncaught errors */
4815
+ onUncaughtError?: (error: unknown) => void;
4816
+ /** Optional client name, defaults to 'ai-sdk-mcp-client' */
4817
+ name?: string;
4818
+ }
4819
+ declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
4820
+ interface MCPClient {
4821
+ tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>(options?: {
4822
+ schemas?: TOOL_SCHEMAS;
4823
+ }): Promise<McpToolSet<TOOL_SCHEMAS>>;
4824
+ close: () => Promise<void>;
4825
+ }
4649
4826
 
4650
- type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
4651
- id: string;
4652
- messages: UI_MESSAGE[];
4653
- requestMetadata: unknown;
4654
- body: Record<string, any> | undefined;
4655
- credentials: RequestCredentials | undefined;
4656
- headers: HeadersInit | undefined;
4657
- api: string;
4658
- } & {
4659
- trigger: 'submit-message' | 'regenerate-message';
4660
- messageId: string | undefined;
4661
- }) => {
4662
- body: object;
4663
- headers?: HeadersInit;
4664
- credentials?: RequestCredentials;
4665
- api?: string;
4666
- } | PromiseLike<{
4667
- body: object;
4668
- headers?: HeadersInit;
4669
- credentials?: RequestCredentials;
4670
- api?: string;
4671
- }>;
4672
- type PrepareReconnectToStreamRequest = (options: {
4673
- id: string;
4674
- requestMetadata: unknown;
4675
- body: Record<string, any> | undefined;
4676
- credentials: RequestCredentials | undefined;
4677
- headers: HeadersInit | undefined;
4678
- api: string;
4679
- }) => {
4680
- headers?: HeadersInit;
4681
- credentials?: RequestCredentials;
4682
- api?: string;
4683
- } | PromiseLike<{
4684
- headers?: HeadersInit;
4685
- credentials?: RequestCredentials;
4686
- api?: string;
4687
- }>;
4688
4827
  /**
4689
- * Options for the `HttpChatTransport` class.
4690
- *
4691
- * @param UI_MESSAGE - The type of message to be used in the chat.
4828
+ The result of a `transcribe` call.
4829
+ It contains the transcript and additional information.
4692
4830
  */
4693
- type HttpChatTransportInitOptions<UI_MESSAGE extends UIMessage> = {
4831
+ interface TranscriptionResult {
4694
4832
  /**
4695
- * The API URL to be used for the chat transport.
4696
- * Defaults to '/api/chat'.
4833
+ * The complete transcribed text from the audio.
4697
4834
  */
4698
- api?: string;
4835
+ readonly text: string;
4699
4836
  /**
4700
- * The credentials mode to be used for the fetch request.
4701
- * Possible values are: 'omit', 'same-origin', 'include'.
4702
- * Defaults to 'same-origin'.
4837
+ * Array of transcript segments with timing information.
4838
+ * Each segment represents a portion of the transcribed text with start and end times.
4703
4839
  */
4704
- credentials?: Resolvable<RequestCredentials>;
4840
+ readonly segments: Array<{
4841
+ /**
4842
+ * The text content of this segment.
4843
+ */
4844
+ readonly text: string;
4845
+ /**
4846
+ * The start time of this segment in seconds.
4847
+ */
4848
+ readonly startSecond: number;
4849
+ /**
4850
+ * The end time of this segment in seconds.
4851
+ */
4852
+ readonly endSecond: number;
4853
+ }>;
4705
4854
  /**
4706
- * HTTP headers to be sent with the API request.
4855
+ * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4856
+ * May be undefined if the language couldn't be detected.
4707
4857
  */
4708
- headers?: Resolvable<Record<string, string> | Headers>;
4858
+ readonly language: string | undefined;
4709
4859
  /**
4710
- * Extra body object to be sent with the API request.
4711
- * @example
4712
- * Send a `sessionId` to the API along with the messages.
4713
- * ```js
4714
- * useChat({
4715
- * body: {
4716
- * sessionId: '123',
4717
- * }
4718
- * })
4719
- * ```
4860
+ * The total duration of the audio file in seconds.
4861
+ * May be undefined if the duration couldn't be determined.
4720
4862
  */
4721
- body?: Resolvable<object>;
4863
+ readonly durationInSeconds: number | undefined;
4722
4864
  /**
4723
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4724
- or to provide a custom fetch implementation for e.g. testing.
4725
- */
4726
- fetch?: FetchFunction;
4865
+ Warnings for the call, e.g. unsupported settings.
4866
+ */
4867
+ readonly warnings: Array<TranscriptionWarning>;
4727
4868
  /**
4728
- * When a function is provided, it will be used
4729
- * to prepare the request body for the chat API. This can be useful for
4730
- * customizing the request body based on the messages and data in the chat.
4731
- *
4732
- * @param id The id of the chat.
4733
- * @param messages The current messages in the chat.
4734
- * @param requestBody The request body object passed in the chat request.
4869
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4735
4870
  */
4736
- prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
4871
+ readonly responses: Array<TranscriptionModelResponseMetadata>;
4737
4872
  /**
4738
- * When a function is provided, it will be used
4739
- * to prepare the request body for the chat API. This can be useful for
4740
- * customizing the request body based on the messages and data in the chat.
4741
- *
4742
- * @param id The id of the chat.
4743
- * @param messages The current messages in the chat.
4744
- * @param requestBody The request body object passed in the chat request.
4873
+ Provider metadata from the provider.
4745
4874
  */
4746
- prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
4747
- };
4748
- declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
4749
- protected api: string;
4750
- protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];
4751
- protected headers: HttpChatTransportInitOptions<UI_MESSAGE>['headers'];
4752
- protected body: HttpChatTransportInitOptions<UI_MESSAGE>['body'];
4753
- protected fetch?: FetchFunction;
4754
- protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
4755
- protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
4756
- constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
4757
- sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
4758
- reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
4759
- protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
4760
- }
4761
-
4762
- declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
4763
- constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
4764
- protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
4875
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4765
4876
  }
4766
4877
 
4767
4878
  /**
4768
- Check if the last message is an assistant message with completed tool call approvals.
4769
- The last step of the message must have at least one tool approval response and
4770
- all tool approvals must have a response.
4771
- */
4772
- declare function lastAssistantMessageIsCompleteWithApprovalResponses({ messages, }: {
4773
- messages: UIMessage[];
4774
- }): boolean;
4775
-
4776
- /**
4777
- Check if the message is an assistant message with completed tool calls.
4778
- The last step of the message must have at least one tool invocation and
4779
- all tool invocations must have a result.
4780
- */
4781
- declare function lastAssistantMessageIsCompleteWithToolCalls({ messages, }: {
4782
- messages: UIMessage[];
4783
- }): boolean;
4879
+ Generates transcripts using a transcription model.
4784
4880
 
4785
- declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
4786
- constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
4787
- protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
4788
- }
4881
+ @param model - The transcription model to use.
4882
+ @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4883
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4884
+ as body parameters.
4885
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4886
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4887
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4789
4888
 
4790
- type CompletionRequestOptions = {
4791
- /**
4792
- An optional object of headers to be passed to the API endpoint.
4793
- */
4794
- headers?: Record<string, string> | Headers;
4889
+ @returns A result object that contains the generated transcript.
4890
+ */
4891
+ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4795
4892
  /**
4796
- An optional object to be passed to the API endpoint.
4893
+ The transcription model to use.
4797
4894
  */
4798
- body?: object;
4799
- };
4800
- type UseCompletionOptions = {
4801
- /**
4802
- * The API endpoint that accepts a `{ prompt: string }` object and returns
4803
- * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
4804
- */
4805
- api?: string;
4806
- /**
4807
- * An unique identifier for the chat. If not provided, a random one will be
4808
- * generated. When provided, the `useChat` hook with the same `id` will
4809
- * have shared states across components.
4810
- */
4811
- id?: string;
4812
- /**
4813
- * Initial prompt input of the completion.
4814
- */
4815
- initialInput?: string;
4816
- /**
4817
- * Initial completion result. Useful to load an existing history.
4818
- */
4819
- initialCompletion?: string;
4820
- /**
4821
- * Callback function to be called when the completion is finished streaming.
4822
- */
4823
- onFinish?: (prompt: string, completion: string) => void;
4824
- /**
4825
- * Callback function to be called when an error is encountered.
4826
- */
4827
- onError?: (error: Error) => void;
4895
+ model: TranscriptionModel;
4828
4896
  /**
4829
- * The credentials mode to be used for the fetch request.
4830
- * Possible values are: 'omit', 'same-origin', 'include'.
4831
- * Defaults to 'same-origin'.
4897
+ The audio data to transcribe.
4832
4898
  */
4833
- credentials?: RequestCredentials;
4899
+ audio: DataContent | URL;
4834
4900
  /**
4835
- * HTTP headers to be sent with the API request.
4836
- */
4837
- headers?: Record<string, string> | Headers;
4901
+ Additional provider-specific options that are passed through to the provider
4902
+ as body parameters.
4903
+
4904
+ The outer record is keyed by the provider name, and the inner
4905
+ record is keyed by the provider-specific metadata key.
4906
+ ```ts
4907
+ {
4908
+ "openai": {
4909
+ "temperature": 0
4910
+ }
4911
+ }
4912
+ ```
4913
+ */
4914
+ providerOptions?: ProviderOptions;
4838
4915
  /**
4839
- * Extra body object to be sent with the API request.
4840
- * @example
4841
- * Send a `sessionId` to the API along with the prompt.
4842
- * ```js
4843
- * useChat({
4844
- * body: {
4845
- * sessionId: '123',
4846
- * }
4847
- * })
4848
- * ```
4916
+ Maximum number of retries per transcript model call. Set to 0 to disable retries.
4917
+
4918
+ @default 2
4849
4919
  */
4850
- body?: object;
4920
+ maxRetries?: number;
4851
4921
  /**
4852
- Streaming protocol that is used. Defaults to `data`.
4853
- */
4854
- streamProtocol?: 'data' | 'text';
4922
+ Abort signal.
4923
+ */
4924
+ abortSignal?: AbortSignal;
4855
4925
  /**
4856
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4857
- or to provide a custom fetch implementation for e.g. testing.
4858
- */
4859
- fetch?: FetchFunction;
4860
- };
4861
-
4862
- type SafeValidateUIMessagesResult<UI_MESSAGE extends UIMessage> = {
4863
- success: true;
4864
- data: Array<UI_MESSAGE>;
4865
- } | {
4866
- success: false;
4867
- error: Error;
4868
- };
4869
- /**
4870
- * Validates a list of UI messages like `validateUIMessages`,
4871
- * but instead of throwing it returns `{ success: true, data }`
4872
- * or `{ success: false, error }`.
4873
- */
4874
- declare function safeValidateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
4875
- messages: unknown;
4876
- metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
4877
- dataSchemas?: {
4878
- [NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
4879
- };
4880
- tools?: {
4881
- [NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
4882
- };
4883
- }): Promise<SafeValidateUIMessagesResult<UI_MESSAGE>>;
4884
- /**
4885
- * Validates a list of UI messages.
4886
- *
4887
- * Metadata, data parts, and generic tool call structures are only validated if
4888
- * the corresponding schemas are provided. Otherwise, they are assumed to be
4889
- * valid.
4890
- */
4891
- declare function validateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
4892
- messages: unknown;
4893
- metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
4894
- dataSchemas?: {
4895
- [NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
4896
- };
4897
- tools?: {
4898
- [NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
4899
- };
4900
- }): Promise<Array<UI_MESSAGE>>;
4926
+ Additional headers to include in the request.
4927
+ Only applicable for HTTP-based providers.
4928
+ */
4929
+ headers?: Record<string, string>;
4930
+ }): Promise<TranscriptionResult>;
4901
4931
 
4902
4932
  declare global {
4903
4933
  /**
@@ -4919,4 +4949,4 @@ declare global {
4919
4949
  var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
4920
4950
  }
4921
4951
 
4922
- export { AbstractChat, Agent, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferAgentUIMessage, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };
4952
+ export { AbstractChat, Agent, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferAgentUIMessage, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };