ai 5.0.0-alpha.11 → 5.0.0-alpha.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { ToolResultContent, Schema, ToolCall, ToolResult, Validator, StandardSchemaV1, IdGenerator, InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
1
+ import { ToolResultContent, Schema, IdGenerator, Validator, StandardSchemaV1, ToolCall, FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
2
2
  export { IdGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
3
3
  import { AISDKError, SharedV2ProviderMetadata, SharedV2ProviderOptions, EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, LanguageModelV2Usage, JSONObject, LanguageModelV2ToolCall, JSONSchema7, LanguageModelV2CallOptions, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
4
4
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
@@ -1974,6 +1974,9 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = ContentPart<TOOLS> | {
1974
1974
  } | {
1975
1975
  type: 'error';
1976
1976
  error: unknown;
1977
+ } | {
1978
+ type: 'raw';
1979
+ rawValue: unknown;
1977
1980
  };
1978
1981
 
1979
1982
  declare const symbol$c: unique symbol;
@@ -2127,29 +2130,34 @@ declare class InvalidMessageRoleError extends AISDKError {
2127
2130
  }
2128
2131
 
2129
2132
  /**
2130
- Tool invocations are either tool calls or tool results. For each assistant tool call,
2131
- there is one tool invocation. While the call is in progress, the invocation is a tool call.
2132
- Once the call is complete, the invocation is a tool result.
2133
+ Create a type from an object with all keys and nested keys set to optional.
2134
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
2135
+ It always recurses into arrays.
2133
2136
 
2134
- The step is used to track how to map an assistant UI message with many tool invocations
2135
- back to a sequence of LLM assistant/tool result message pairs.
2136
- It is optional for backwards compatibility.
2137
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
2137
2138
  */
2138
- type ToolInvocation = ({
2139
- state: 'partial-call';
2140
- } & ToolCall<string, any>) | ({
2141
- state: 'call';
2142
- } & ToolCall<string, any>) | ({
2143
- state: 'result';
2144
- } & ToolResult<string, any, any>);
2139
+ type DeepPartial<T> = T extends z3.ZodTypeAny ? DeepPartialInternal<z3.infer<T>> : T extends z4.$ZodType ? DeepPartialInternal<z4.infer<T>> : DeepPartialInternal<T>;
2140
+ type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
2141
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2142
+ type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
2143
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2144
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
2145
+ type PartialObject<ObjectType extends object> = {
2146
+ [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
2147
+ };
2148
+
2145
2149
  /**
2146
2150
  The data types that can be used in the UI message for the UI message data parts.
2147
2151
  */
2148
2152
  type UIDataTypes = Record<string, unknown>;
2153
+ type UITools = Record<string, {
2154
+ args: unknown;
2155
+ result: unknown | undefined;
2156
+ }>;
2149
2157
  /**
2150
2158
  AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
2151
2159
  */
2152
- interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes> {
2160
+ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
2153
2161
  /**
2154
2162
  A unique identifier for the message.
2155
2163
  */
@@ -2172,20 +2180,9 @@ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataT
2172
2180
 
2173
2181
  Assistant messages can have text, reasoning, tool invocation, and file parts.
2174
2182
  */
2175
- parts: Array<UIMessagePart<DATA_PARTS>>;
2183
+ parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
2176
2184
  }
2177
- type UIMessagePart<DATA_TYPES extends UIDataTypes> = TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
2178
- type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
2179
- [NAME in keyof DATA_TYPES & string]: {
2180
- type: `data-${NAME}`;
2181
- id?: string;
2182
- data: DATA_TYPES[NAME];
2183
- };
2184
- }>;
2185
- type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
2186
- type InferUIDataParts<T extends UIDataPartSchemas> = {
2187
- [K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
2188
- };
2185
+ type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
2189
2186
  /**
2190
2187
  * A text part of a message.
2191
2188
  */
@@ -2210,16 +2207,6 @@ type ReasoningUIPart = {
2210
2207
  */
2211
2208
  providerMetadata?: Record<string, any>;
2212
2209
  };
2213
- /**
2214
- * A tool invocation part of a message.
2215
- */
2216
- type ToolInvocationUIPart = {
2217
- type: 'tool-invocation';
2218
- /**
2219
- * The tool invocation.
2220
- */
2221
- toolInvocation: ToolInvocation;
2222
- };
2223
2210
  /**
2224
2211
  * A source part of a message.
2225
2212
  */
@@ -2268,10 +2255,33 @@ type FileUIPart = {
2268
2255
  type StepStartUIPart = {
2269
2256
  type: 'step-start';
2270
2257
  };
2271
- type CreateUIMessage<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = Omit<UIMessage<METADATA, DATA_TYPES>, 'id' | 'role'> & {
2272
- id?: UIMessage<METADATA, DATA_TYPES>['id'];
2273
- role?: UIMessage<METADATA, DATA_TYPES>['role'];
2274
- };
2258
+ type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
2259
+ [NAME in keyof DATA_TYPES & string]: {
2260
+ type: `data-${NAME}`;
2261
+ id?: string;
2262
+ data: DATA_TYPES[NAME];
2263
+ };
2264
+ }>;
2265
+ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
2266
+ [NAME in keyof TOOLS & string]: {
2267
+ type: `tool-${NAME}`;
2268
+ toolCallId: string;
2269
+ } & ({
2270
+ state: 'partial-call';
2271
+ args: DeepPartial<TOOLS[NAME]['args']>;
2272
+ } | {
2273
+ state: 'call';
2274
+ args: TOOLS[NAME]['args'];
2275
+ } | {
2276
+ state: 'result';
2277
+ args: TOOLS[NAME]['args'];
2278
+ result: TOOLS[NAME]['result'];
2279
+ });
2280
+ }>;
2281
+ declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
2282
+ declare function getToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
2283
+ type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
2284
+ type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
2275
2285
 
2276
2286
  declare const symbol$3: unique symbol;
2277
2287
  declare class MessageConversionError extends AISDKError {
@@ -2341,300 +2351,842 @@ declare function callCompletionApi({ api, prompt, credentials, headers, body, st
2341
2351
  fetch: ReturnType<typeof getOriginalFetch> | undefined;
2342
2352
  }): Promise<string | null | undefined>;
2343
2353
 
2344
- /**
2345
- The result of an `embed` call.
2346
- It contains the embedding, the value, and additional information.
2347
- */
2348
- interface EmbedResult<VALUE> {
2354
+ type DataUIMessageStreamPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
2355
+ [NAME in keyof DATA_TYPES & string]: {
2356
+ type: `data-${NAME}`;
2357
+ id?: string;
2358
+ data: DATA_TYPES[NAME];
2359
+ };
2360
+ }>;
2361
+ type UIMessageStreamPart<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
2362
+ type: 'text';
2363
+ text: string;
2364
+ } | {
2365
+ type: 'error';
2366
+ errorText: string;
2367
+ } | {
2368
+ type: 'tool-call';
2369
+ toolCallId: string;
2370
+ toolName: string;
2371
+ args: unknown;
2372
+ } | {
2373
+ type: 'tool-result';
2374
+ toolCallId: string;
2375
+ result: unknown;
2376
+ providerMetadata?: ProviderMetadata;
2377
+ } | {
2378
+ type: 'tool-call-streaming-start';
2379
+ toolCallId: string;
2380
+ toolName: string;
2381
+ } | {
2382
+ type: 'tool-call-delta';
2383
+ toolCallId: string;
2384
+ argsTextDelta: string;
2385
+ } | {
2386
+ type: 'reasoning';
2387
+ text: string;
2388
+ providerMetadata?: ProviderMetadata;
2389
+ } | {
2390
+ type: 'reasoning-part-finish';
2391
+ } | {
2392
+ type: 'source-url';
2393
+ sourceId: string;
2394
+ url: string;
2395
+ title?: string;
2396
+ providerMetadata?: ProviderMetadata;
2397
+ } | {
2398
+ type: 'source-document';
2399
+ sourceId: string;
2400
+ mediaType: string;
2401
+ title: string;
2402
+ filename?: string;
2403
+ providerMetadata?: ProviderMetadata;
2404
+ } | {
2405
+ type: 'file';
2406
+ url: string;
2407
+ mediaType: string;
2408
+ } | DataUIMessageStreamPart<DATA_TYPES> | {
2409
+ type: 'start-step';
2410
+ } | {
2411
+ type: 'finish-step';
2412
+ } | {
2413
+ type: 'start';
2414
+ messageId?: string;
2415
+ messageMetadata?: METADATA;
2416
+ } | {
2417
+ type: 'finish';
2418
+ messageMetadata?: METADATA;
2419
+ } | {
2420
+ type: 'message-metadata';
2421
+ messageMetadata: METADATA;
2422
+ };
2423
+ type InferUIMessageStreamPart<T extends UIMessage> = UIMessageStreamPart<InferUIMessageMetadata<T>, InferUIMessageData<T>>;
2424
+
2425
+ interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
2349
2426
  /**
2350
- The value that was embedded.
2351
- */
2352
- readonly value: VALUE;
2427
+ * Appends a data stream part to the stream.
2428
+ */
2429
+ write(part: InferUIMessageStreamPart<UI_MESSAGE>): void;
2353
2430
  /**
2354
- The embedding of the value.
2355
- */
2356
- readonly embedding: Embedding;
2431
+ * Merges the contents of another stream to this stream.
2432
+ */
2433
+ merge(stream: ReadableStream<InferUIMessageStreamPart<UI_MESSAGE>>): void;
2357
2434
  /**
2358
- The embedding token usage.
2359
- */
2360
- readonly usage: EmbeddingModelUsage;
2435
+ * Error handler that is used by the data stream writer.
2436
+ * This is intended for forwarding when merging streams
2437
+ * to prevent duplicated error masking.
2438
+ */
2439
+ onError: ((error: unknown) => string) | undefined;
2440
+ }
2441
+
2442
+ declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, // mask error messages for safety by default
2443
+ originalMessages, onFinish, generateId, }: {
2444
+ execute: (options: {
2445
+ writer: UIMessageStreamWriter<UI_MESSAGE>;
2446
+ }) => Promise<void> | void;
2447
+ onError?: (error: unknown) => string;
2361
2448
  /**
2362
- Optional response data.
2363
- */
2364
- readonly response?: {
2449
+ * The original messages. If they are provided, persistence mode is assumed,
2450
+ * and a message ID is provided for the response message.
2451
+ */
2452
+ originalMessages?: UI_MESSAGE[];
2453
+ onFinish?: (options: {
2365
2454
  /**
2366
- Response headers.
2367
- */
2368
- headers?: Record<string, string>;
2455
+ * The updates list of UI messages.
2456
+ */
2457
+ messages: UI_MESSAGE[];
2369
2458
  /**
2370
- The response body.
2371
- */
2372
- body?: unknown;
2373
- };
2374
- }
2459
+ * Indicates whether the response message is a continuation of the last original message,
2460
+ * or if a new message was created.
2461
+ */
2462
+ isContinuation: boolean;
2463
+ /**
2464
+ * The message that was sent to the client as a response
2465
+ * (including the original message if it was extended).
2466
+ */
2467
+ responseMessage: UI_MESSAGE;
2468
+ }) => void;
2469
+ generateId?: IdGenerator;
2470
+ }): ReadableStream<InferUIMessageStreamPart<UI_MESSAGE>>;
2375
2471
 
2376
- /**
2377
- Embed a value using an embedding model. The type of the value is defined by the embedding model.
2472
+ declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
2473
+ stream: ReadableStream<UIMessageStreamPart>;
2474
+ }): Response;
2378
2475
 
2379
- @param model - The embedding model to use.
2380
- @param value - The value that should be embedded.
2476
+ declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, }: {
2477
+ response: ServerResponse;
2478
+ stream: ReadableStream<UIMessageStreamPart>;
2479
+ } & ResponseInit): void;
2381
2480
 
2382
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2383
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2384
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2481
+ declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
2482
+ constructor();
2483
+ }
2385
2484
 
2386
- @returns A result object that contains the embedding, the value, and additional information.
2387
- */
2388
- declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
2485
+ interface ChatTransport<UI_MESSAGE extends UIMessage> {
2486
+ submitMessages: (options: {
2487
+ chatId: string;
2488
+ messages: UI_MESSAGE[];
2489
+ abortSignal: AbortSignal | undefined;
2490
+ requestType: 'generate' | 'resume';
2491
+ } & ChatRequestOptions) => Promise<ReadableStream<UIMessageStreamPart>>;
2492
+ }
2493
+
2494
+ type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
2495
+ id?: UI_MESSAGE['id'];
2496
+ role?: UI_MESSAGE['role'];
2497
+ };
2498
+ type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
2499
+ type UIDataTypesToSchemas<T extends UIDataTypes> = {
2500
+ [K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
2501
+ };
2502
+ type InferUIDataParts<T extends UIDataPartSchemas> = {
2503
+ [K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
2504
+ };
2505
+ type ChatRequestOptions = {
2389
2506
  /**
2390
- The embedding model to use.
2391
- */
2392
- model: EmbeddingModel<VALUE>;
2507
+ Additional headers that should be to be passed to the API endpoint.
2508
+ */
2509
+ headers?: Record<string, string> | Headers;
2393
2510
  /**
2394
- The value that should be embedded.
2511
+ Additional body JSON properties that should be sent to the API endpoint.
2395
2512
  */
2396
- value: VALUE;
2513
+ body?: object;
2514
+ metadata?: unknown;
2515
+ };
2516
+ type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
2517
+ interface ChatState<UI_MESSAGE extends UIMessage> {
2518
+ status: ChatStatus;
2519
+ error: Error | undefined;
2520
+ messages: UI_MESSAGE[];
2521
+ pushMessage: (message: UI_MESSAGE) => void;
2522
+ popMessage: () => void;
2523
+ replaceMessage: (index: number, message: UI_MESSAGE) => void;
2524
+ snapshot: <T>(thing: T) => T;
2525
+ }
2526
+ interface ChatInit<UI_MESSAGE extends UIMessage> {
2397
2527
  /**
2398
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
2399
-
2400
- @default 2
2528
+ * A unique identifier for the chat. If not provided, a random one will be
2529
+ * generated.
2401
2530
  */
2402
- maxRetries?: number;
2531
+ id?: string;
2532
+ messageMetadataSchema?: Validator<InferUIMessageMetadata<UI_MESSAGE>> | StandardSchemaV1<InferUIMessageMetadata<UI_MESSAGE>>;
2533
+ dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
2534
+ messages?: UI_MESSAGE[];
2403
2535
  /**
2404
- Abort signal.
2405
- */
2406
- abortSignal?: AbortSignal;
2536
+ * A way to provide a function that is going to be used for ids for messages and the chat.
2537
+ * If not provided the default AI SDK `generateId` is used.
2538
+ */
2539
+ generateId?: IdGenerator;
2540
+ transport?: ChatTransport<UI_MESSAGE>;
2541
+ maxSteps?: number;
2407
2542
  /**
2408
- Additional headers to include in the request.
2409
- Only applicable for HTTP-based providers.
2410
- */
2411
- headers?: Record<string, string>;
2543
+ * Callback function to be called when an error is encountered.
2544
+ */
2545
+ onError?: (error: Error) => void;
2412
2546
  /**
2413
- Additional provider-specific options. They are passed through
2414
- to the provider from the AI SDK and enable provider-specific
2415
- functionality that can be fully encapsulated in the provider.
2416
- */
2417
- providerOptions?: ProviderOptions;
2547
+ Optional callback function that is invoked when a tool call is received.
2548
+ Intended for automatic client-side tool execution.
2549
+
2550
+ You can optionally return a result for the tool call,
2551
+ either synchronously or asynchronously.
2552
+ */
2553
+ onToolCall?: ({ toolCall, }: {
2554
+ toolCall: ToolCall<string, unknown>;
2555
+ }) => void | Promise<unknown> | unknown;
2418
2556
  /**
2419
- * Optional telemetry configuration (experimental).
2557
+ * Optional callback function that is called when the assistant message is finished streaming.
2558
+ *
2559
+ * @param message The message that was streamed.
2420
2560
  */
2421
- experimental_telemetry?: TelemetrySettings;
2422
- }): Promise<EmbedResult<VALUE>>;
2423
-
2424
- /**
2425
- The result of a `embedMany` call.
2426
- It contains the embeddings, the values, and additional information.
2427
- */
2428
- interface EmbedManyResult<VALUE> {
2561
+ onFinish?: (options: {
2562
+ message: UI_MESSAGE;
2563
+ }) => void;
2564
+ }
2565
+ declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
2566
+ readonly id: string;
2567
+ readonly generateId: IdGenerator;
2568
+ protected state: ChatState<UI_MESSAGE>;
2569
+ private messageMetadataSchema;
2570
+ private dataPartSchemas;
2571
+ private readonly transport;
2572
+ private maxSteps;
2573
+ private onError?;
2574
+ private onToolCall?;
2575
+ private onFinish?;
2576
+ private activeResponse;
2577
+ private jobExecutor;
2578
+ constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
2579
+ state: ChatState<UI_MESSAGE>;
2580
+ });
2429
2581
  /**
2430
- The values that were embedded.
2431
- */
2432
- readonly values: Array<VALUE>;
2582
+ * Hook status:
2583
+ *
2584
+ * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
2585
+ * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
2586
+ * - `ready`: The full response has been received and processed; a new user message can be submitted.
2587
+ * - `error`: An error occurred during the API request, preventing successful completion.
2588
+ */
2589
+ get status(): ChatStatus;
2590
+ protected setStatus({ status, error, }: {
2591
+ status: ChatStatus;
2592
+ error?: Error;
2593
+ }): void;
2594
+ get error(): Error | undefined;
2595
+ get messages(): UI_MESSAGE[];
2596
+ get lastMessage(): UI_MESSAGE | undefined;
2597
+ set messages(messages: UI_MESSAGE[]);
2598
+ removeAssistantResponse: () => void;
2433
2599
  /**
2434
- The embeddings. They are in the same order as the values.
2435
- */
2436
- readonly embeddings: Array<Embedding>;
2600
+ * Append a user message to the chat list. This triggers the API call to fetch
2601
+ * the assistant's response.
2602
+ */
2603
+ sendMessage: (message: (CreateUIMessage<UI_MESSAGE> & {
2604
+ text?: never;
2605
+ files?: never;
2606
+ }) | {
2607
+ text: string;
2608
+ files?: FileList | FileUIPart[];
2609
+ metadata?: InferUIMessageMetadata<UI_MESSAGE>;
2610
+ parts?: never;
2611
+ } | {
2612
+ files: FileList | FileUIPart[];
2613
+ metadata?: InferUIMessageMetadata<UI_MESSAGE>;
2614
+ parts?: never;
2615
+ }, options?: ChatRequestOptions) => Promise<void>;
2437
2616
  /**
2438
- The embedding token usage.
2439
- */
2440
- readonly usage: EmbeddingModelUsage;
2617
+ * Regenerate the last assistant message.
2618
+ */
2619
+ reload: (options?: ChatRequestOptions) => Promise<void>;
2441
2620
  /**
2442
- Optional raw response data.
2443
- */
2444
- readonly responses?: Array<{
2445
- /**
2446
- Response headers.
2447
- */
2448
- headers?: Record<string, string>;
2449
- /**
2450
- The response body.
2451
- */
2452
- body?: unknown;
2453
- } | undefined>;
2621
+ * Resume an ongoing chat generation stream. This does not resume an aborted generation.
2622
+ */
2623
+ experimental_resume: (options?: ChatRequestOptions) => Promise<void>;
2624
+ addToolResult: ({ toolCallId, result, }: {
2625
+ toolCallId: string;
2626
+ result: unknown;
2627
+ }) => Promise<void>;
2628
+ /**
2629
+ * Abort the current request immediately, keep the generated tokens if any.
2630
+ */
2631
+ stop: () => Promise<void>;
2632
+ private triggerRequest;
2454
2633
  }
2455
2634
 
2635
+ declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
2636
+
2456
2637
  /**
2457
- Embed several values using an embedding model. The type of the value is defined
2458
- by the embedding model.
2638
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
2639
+ with the AI core functions (e.g. `streamText`).
2640
+ */
2641
+ declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
2642
+ tools?: TOOLS;
2643
+ }): ModelMessage[];
2644
+ /**
2645
+ @deprecated Use `convertToModelMessages` instead.
2646
+ */
2647
+ declare const convertToCoreMessages: typeof convertToModelMessages;
2459
2648
 
2460
- `embedMany` automatically splits large requests into smaller chunks if the model
2461
- has a limit on how many embeddings can be generated in a single call.
2649
+ type PrepareRequest<UI_MESSAGE extends UIMessage> = (options: {
2650
+ id: string;
2651
+ messages: UI_MESSAGE[];
2652
+ requestMetadata: unknown;
2653
+ body: Record<string, any> | undefined;
2654
+ credentials: RequestCredentials | undefined;
2655
+ headers: HeadersInit | undefined;
2656
+ }) => {
2657
+ body: object;
2658
+ headers?: HeadersInit;
2659
+ credentials?: RequestCredentials;
2660
+ };
2462
2661
 
2463
- @param model - The embedding model to use.
2464
- @param values - The values that should be embedded.
2662
+ declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
2663
+ private api;
2664
+ private credentials?;
2665
+ private headers?;
2666
+ private body?;
2667
+ private fetch?;
2668
+ private prepareRequest?;
2669
+ constructor({ api, credentials, headers, body, fetch, prepareRequest, }?: {
2670
+ api?: string;
2671
+ /**
2672
+ * The credentials mode to be used for the fetch request.
2673
+ * Possible values are: 'omit', 'same-origin', 'include'.
2674
+ * Defaults to 'same-origin'.
2675
+ */
2676
+ credentials?: RequestCredentials;
2677
+ /**
2678
+ * HTTP headers to be sent with the API request.
2679
+ */
2680
+ headers?: Record<string, string> | Headers;
2681
+ /**
2682
+ * Extra body object to be sent with the API request.
2683
+ * @example
2684
+ * Send a `sessionId` to the API along with the messages.
2685
+ * ```js
2686
+ * useChat({
2687
+ * body: {
2688
+ * sessionId: '123',
2689
+ * }
2690
+ * })
2691
+ * ```
2692
+ */
2693
+ body?: object;
2694
+ /**
2695
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
2696
+ or to provide a custom fetch implementation for e.g. testing.
2697
+ */
2698
+ fetch?: FetchFunction;
2699
+ /**
2700
+ * When a function is provided, it will be used
2701
+ * to prepare the request body for the chat API. This can be useful for
2702
+ * customizing the request body based on the messages and data in the chat.
2703
+ *
2704
+ * @param id The id of the chat.
2705
+ * @param messages The current messages in the chat.
2706
+ * @param requestBody The request body object passed in the chat request.
2707
+ */
2708
+ prepareRequest?: PrepareRequest<UI_MESSAGE>;
2709
+ });
2710
+ submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<UI_MESSAGE>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
2711
+ }
2465
2712
 
2466
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2467
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2468
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2713
+ declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
2714
+ private api;
2715
+ private credentials?;
2716
+ private headers?;
2717
+ private body?;
2718
+ private fetch?;
2719
+ private prepareRequest?;
2720
+ constructor({ api, credentials, headers, body, fetch, prepareRequest, }: {
2721
+ api: string;
2722
+ /**
2723
+ * The credentials mode to be used for the fetch request.
2724
+ * Possible values are: 'omit', 'same-origin', 'include'.
2725
+ * Defaults to 'same-origin'.
2726
+ */
2727
+ credentials?: RequestCredentials;
2728
+ /**
2729
+ * HTTP headers to be sent with the API request.
2730
+ */
2731
+ headers?: Record<string, string> | Headers;
2732
+ /**
2733
+ * Extra body object to be sent with the API request.
2734
+ * @example
2735
+ * Send a `sessionId` to the API along with the messages.
2736
+ * ```js
2737
+ * useChat({
2738
+ * body: {
2739
+ * sessionId: '123',
2740
+ * }
2741
+ * })
2742
+ * ```
2743
+ */
2744
+ body?: object;
2745
+ /**
2746
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
2747
+ or to provide a custom fetch implementation for e.g. testing.
2748
+ */
2749
+ fetch?: FetchFunction;
2750
+ /**
2751
+ * When a function is provided, it will be used
2752
+ * to prepare the request body for the chat API. This can be useful for
2753
+ * customizing the request body based on the messages and data in the chat.
2754
+ *
2755
+ * @param id The id of the chat.
2756
+ * @param messages The current messages in the chat.
2757
+ * @param requestBody The request body object passed in the chat request.
2758
+ */
2759
+ prepareRequest?: NoInfer<PrepareRequest<UI_MESSAGE>>;
2760
+ });
2761
+ submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<UI_MESSAGE>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart<never, never>>>;
2762
+ }
2469
2763
 
2470
- @returns A result object that contains the embeddings, the value, and additional information.
2471
- */
2472
- declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
2764
+ type CompletionRequestOptions = {
2473
2765
  /**
2474
- The embedding model to use.
2766
+ An optional object of headers to be passed to the API endpoint.
2767
+ */
2768
+ headers?: Record<string, string> | Headers;
2769
+ /**
2770
+ An optional object to be passed to the API endpoint.
2475
2771
  */
2476
- model: EmbeddingModel<VALUE>;
2772
+ body?: object;
2773
+ };
2774
+ type UseCompletionOptions = {
2477
2775
  /**
2478
- The values that should be embedded.
2776
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
2777
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
2479
2778
  */
2480
- values: Array<VALUE>;
2779
+ api?: string;
2481
2780
  /**
2482
- Maximum number of retries per embedding model call. Set to 0 to disable retries.
2483
-
2484
- @default 2
2781
+ * An unique identifier for the chat. If not provided, a random one will be
2782
+ * generated. When provided, the `useChat` hook with the same `id` will
2783
+ * have shared states across components.
2485
2784
  */
2486
- maxRetries?: number;
2785
+ id?: string;
2487
2786
  /**
2488
- Abort signal.
2489
- */
2490
- abortSignal?: AbortSignal;
2787
+ * Initial prompt input of the completion.
2788
+ */
2789
+ initialInput?: string;
2491
2790
  /**
2492
- Additional headers to include in the request.
2493
- Only applicable for HTTP-based providers.
2494
- */
2495
- headers?: Record<string, string>;
2791
+ * Initial completion result. Useful to load an existing history.
2792
+ */
2793
+ initialCompletion?: string;
2496
2794
  /**
2497
- * Optional telemetry configuration (experimental).
2795
+ * Callback function to be called when the completion is finished streaming.
2498
2796
  */
2499
- experimental_telemetry?: TelemetrySettings;
2797
+ onFinish?: (prompt: string, completion: string) => void;
2500
2798
  /**
2501
- Additional provider-specific options. They are passed through
2502
- to the provider from the AI SDK and enable provider-specific
2503
- functionality that can be fully encapsulated in the provider.
2504
- */
2505
- providerOptions?: ProviderOptions;
2799
+ * Callback function to be called when an error is encountered.
2800
+ */
2801
+ onError?: (error: Error) => void;
2506
2802
  /**
2507
- * Maximum number of concurrent requests.
2508
- *
2509
- * @default Infinity
2803
+ * The credentials mode to be used for the fetch request.
2804
+ * Possible values are: 'omit', 'same-origin', 'include'.
2805
+ * Defaults to 'same-origin'.
2510
2806
  */
2511
- maxParallelCalls?: number;
2512
- }): Promise<EmbedManyResult<VALUE>>;
2807
+ credentials?: RequestCredentials;
2808
+ /**
2809
+ * HTTP headers to be sent with the API request.
2810
+ */
2811
+ headers?: Record<string, string> | Headers;
2812
+ /**
2813
+ * Extra body object to be sent with the API request.
2814
+ * @example
2815
+ * Send a `sessionId` to the API along with the prompt.
2816
+ * ```js
2817
+ * useChat({
2818
+ * body: {
2819
+ * sessionId: '123',
2820
+ * }
2821
+ * })
2822
+ * ```
2823
+ */
2824
+ body?: object;
2825
+ /**
2826
+ Streaming protocol that is used. Defaults to `data`.
2827
+ */
2828
+ streamProtocol?: 'data' | 'text';
2829
+ /**
2830
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
2831
+ or to provide a custom fetch implementation for e.g. testing.
2832
+ */
2833
+ fetch?: FetchFunction;
2834
+ };
2513
2835
 
2514
2836
  /**
2515
- A message that was generated during the generation process.
2516
- It can be either an assistant message or a tool message.
2837
+ * Calculates the cosine similarity between two vectors. This is a useful metric for
2838
+ * comparing the similarity of two vectors such as embeddings.
2839
+ *
2840
+ * @param vector1 - The first vector.
2841
+ * @param vector2 - The second vector.
2842
+ *
2843
+ * @returns The cosine similarity between vector1 and vector2.
2844
+ * @returns 0 if either vector is the zero vector.
2845
+ *
2846
+ * @throws {InvalidArgumentError} If the vectors do not have the same length.
2517
2847
  */
2518
- type ResponseMessage = AssistantModelMessage | ToolModelMessage;
2848
+ declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
2519
2849
 
2520
2850
  /**
2521
- * The result of a single step in the generation process.
2851
+ * Converts a data URL of type text/* to a text string.
2522
2852
  */
2523
- type StepResult<TOOLS extends ToolSet> = {
2524
- /**
2525
- The content that was generated in the last step.
2526
- */
2527
- readonly content: Array<ContentPart<TOOLS>>;
2528
- /**
2529
- The generated text.
2530
- */
2531
- readonly text: string;
2532
- /**
2533
- The reasoning that was generated during the generation.
2534
- */
2535
- readonly reasoning: Array<ReasoningPart>;
2536
- /**
2537
- The reasoning text that was generated during the generation.
2538
- */
2539
- readonly reasoningText: string | undefined;
2540
- /**
2541
- The files that were generated during the generation.
2542
- */
2543
- readonly files: Array<GeneratedFile>;
2544
- /**
2545
- The sources that were used to generate the text.
2546
- */
2547
- readonly sources: Array<Source>;
2548
- /**
2549
- The tool calls that were made during the generation.
2550
- */
2551
- readonly toolCalls: ToolCallArray<TOOLS>;
2552
- /**
2553
- The results of the tool calls.
2554
- */
2555
- readonly toolResults: ToolResultArray<TOOLS>;
2556
- /**
2557
- The reason why the generation finished.
2558
- */
2559
- readonly finishReason: FinishReason;
2853
+ declare function getTextFromDataUrl(dataUrl: string): string;
2854
+
2855
+ /**
2856
+ * Performs a deep-equal comparison of two parsed JSON objects.
2857
+ *
2858
+ * @param {any} obj1 - The first object to compare.
2859
+ * @param {any} obj2 - The second object to compare.
2860
+ * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
2861
+ */
2862
+ declare function isDeepEqualData(obj1: any, obj2: any): boolean;
2863
+
2864
+ declare function parsePartialJson(jsonText: string | undefined): Promise<{
2865
+ value: JSONValue$1 | undefined;
2866
+ state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
2867
+ }>;
2868
+
2869
+ type Job = () => Promise<void>;
2870
+
2871
+ declare class SerialJobExecutor {
2872
+ private queue;
2873
+ private isProcessing;
2874
+ private processQueue;
2875
+ run(job: Job): Promise<void>;
2876
+ }
2877
+
2878
+ /**
2879
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
2880
+ *
2881
+ * @param options - The configuration options
2882
+ * @param options.chunks - Array of values to be emitted by the stream
2883
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2884
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2885
+ * @returns A ReadableStream that emits the provided values
2886
+ */
2887
+ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
2888
+ chunks: T[];
2889
+ initialDelayInMs?: number | null;
2890
+ chunkDelayInMs?: number | null;
2891
+ _internal?: {
2892
+ delay?: (ms: number | null) => Promise<void>;
2893
+ };
2894
+ }): ReadableStream<T>;
2895
+
2896
+ /**
2897
+ The result of an `embed` call.
2898
+ It contains the embedding, the value, and additional information.
2899
+ */
2900
+ interface EmbedResult<VALUE> {
2560
2901
  /**
2561
- The token usage of the generated text.
2562
- */
2563
- readonly usage: LanguageModelUsage;
2902
+ The value that was embedded.
2903
+ */
2904
+ readonly value: VALUE;
2564
2905
  /**
2565
- Warnings from the model provider (e.g. unsupported settings).
2566
- */
2567
- readonly warnings: CallWarning[] | undefined;
2906
+ The embedding of the value.
2907
+ */
2908
+ readonly embedding: Embedding;
2568
2909
  /**
2569
- Additional request information.
2570
- */
2571
- readonly request: LanguageModelRequestMetadata;
2910
+ The embedding token usage.
2911
+ */
2912
+ readonly usage: EmbeddingModelUsage;
2572
2913
  /**
2573
- Additional response information.
2574
- */
2575
- readonly response: LanguageModelResponseMetadata & {
2914
+ Optional response data.
2915
+ */
2916
+ readonly response?: {
2576
2917
  /**
2577
- The response messages that were generated during the call.
2578
- Response messages can be either assistant messages or tool messages.
2579
- They contain a generated id.
2580
- */
2581
- readonly messages: Array<ResponseMessage>;
2918
+ Response headers.
2919
+ */
2920
+ headers?: Record<string, string>;
2582
2921
  /**
2583
- Response body (available only for providers that use HTTP requests).
2584
- */
2922
+ The response body.
2923
+ */
2585
2924
  body?: unknown;
2586
2925
  };
2587
- /**
2588
- Additional provider-specific metadata. They are passed through
2589
- from the provider to the AI SDK and enable provider-specific
2590
- results that can be fully encapsulated in the provider.
2591
- */
2592
- readonly providerMetadata: ProviderMetadata | undefined;
2593
- };
2926
+ }
2594
2927
 
2595
2928
  /**
2596
- The result of a `generateText` call.
2597
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
2929
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
2930
+
2931
+ @param model - The embedding model to use.
2932
+ @param value - The value that should be embedded.
2933
+
2934
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2935
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2936
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2937
+
2938
+ @returns A result object that contains the embedding, the value, and additional information.
2598
2939
  */
2599
- interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2600
- /**
2601
- The content that was generated in the last step.
2602
- */
2603
- readonly content: Array<ContentPart<TOOLS>>;
2940
+ declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
2604
2941
  /**
2605
- The text that was generated in the last step.
2942
+ The embedding model to use.
2606
2943
  */
2607
- readonly text: string;
2944
+ model: EmbeddingModel<VALUE>;
2608
2945
  /**
2609
- The full reasoning that the model has generated in the last step.
2946
+ The value that should be embedded.
2610
2947
  */
2611
- readonly reasoning: Array<ReasoningPart>;
2948
+ value: VALUE;
2612
2949
  /**
2613
- The reasoning text that the model has generated in the last step. Can be undefined if the model
2614
- has only generated text.
2950
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
2951
+
2952
+ @default 2
2615
2953
  */
2616
- readonly reasoningText: string | undefined;
2617
- /**
2618
- The files that were generated in the last step.
2619
- Empty array if no files were generated.
2620
- */
2621
- readonly files: Array<GeneratedFile>;
2954
+ maxRetries?: number;
2622
2955
  /**
2623
- Sources that have been used as references in the last step.
2624
- */
2625
- readonly sources: Array<Source>;
2956
+ Abort signal.
2957
+ */
2958
+ abortSignal?: AbortSignal;
2626
2959
  /**
2627
- The tool calls that were made in the last step.
2628
- */
2629
- readonly toolCalls: ToolCallArray<TOOLS>;
2960
+ Additional headers to include in the request.
2961
+ Only applicable for HTTP-based providers.
2962
+ */
2963
+ headers?: Record<string, string>;
2630
2964
  /**
2631
- The results of the tool calls from the last step.
2632
- */
2633
- readonly toolResults: ToolResultArray<TOOLS>;
2965
+ Additional provider-specific options. They are passed through
2966
+ to the provider from the AI SDK and enable provider-specific
2967
+ functionality that can be fully encapsulated in the provider.
2968
+ */
2969
+ providerOptions?: ProviderOptions;
2634
2970
  /**
2635
- The reason why the generation finished.
2971
+ * Optional telemetry configuration (experimental).
2636
2972
  */
2637
- readonly finishReason: FinishReason;
2973
+ experimental_telemetry?: TelemetrySettings;
2974
+ }): Promise<EmbedResult<VALUE>>;
2975
+
2976
+ /**
2977
+ The result of a `embedMany` call.
2978
+ It contains the embeddings, the values, and additional information.
2979
+ */
2980
+ interface EmbedManyResult<VALUE> {
2981
+ /**
2982
+ The values that were embedded.
2983
+ */
2984
+ readonly values: Array<VALUE>;
2985
+ /**
2986
+ The embeddings. They are in the same order as the values.
2987
+ */
2988
+ readonly embeddings: Array<Embedding>;
2989
+ /**
2990
+ The embedding token usage.
2991
+ */
2992
+ readonly usage: EmbeddingModelUsage;
2993
+ /**
2994
+ Optional raw response data.
2995
+ */
2996
+ readonly responses?: Array<{
2997
+ /**
2998
+ Response headers.
2999
+ */
3000
+ headers?: Record<string, string>;
3001
+ /**
3002
+ The response body.
3003
+ */
3004
+ body?: unknown;
3005
+ } | undefined>;
3006
+ }
3007
+
3008
+ /**
3009
+ Embed several values using an embedding model. The type of the value is defined
3010
+ by the embedding model.
3011
+
3012
+ `embedMany` automatically splits large requests into smaller chunks if the model
3013
+ has a limit on how many embeddings can be generated in a single call.
3014
+
3015
+ @param model - The embedding model to use.
3016
+ @param values - The values that should be embedded.
3017
+
3018
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
3019
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
3020
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
3021
+
3022
+ @returns A result object that contains the embeddings, the value, and additional information.
3023
+ */
3024
+ declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
3025
+ /**
3026
+ The embedding model to use.
3027
+ */
3028
+ model: EmbeddingModel<VALUE>;
3029
+ /**
3030
+ The values that should be embedded.
3031
+ */
3032
+ values: Array<VALUE>;
3033
+ /**
3034
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
3035
+
3036
+ @default 2
3037
+ */
3038
+ maxRetries?: number;
3039
+ /**
3040
+ Abort signal.
3041
+ */
3042
+ abortSignal?: AbortSignal;
3043
+ /**
3044
+ Additional headers to include in the request.
3045
+ Only applicable for HTTP-based providers.
3046
+ */
3047
+ headers?: Record<string, string>;
3048
+ /**
3049
+ * Optional telemetry configuration (experimental).
3050
+ */
3051
+ experimental_telemetry?: TelemetrySettings;
3052
+ /**
3053
+ Additional provider-specific options. They are passed through
3054
+ to the provider from the AI SDK and enable provider-specific
3055
+ functionality that can be fully encapsulated in the provider.
3056
+ */
3057
+ providerOptions?: ProviderOptions;
3058
+ /**
3059
+ * Maximum number of concurrent requests.
3060
+ *
3061
+ * @default Infinity
3062
+ */
3063
+ maxParallelCalls?: number;
3064
+ }): Promise<EmbedManyResult<VALUE>>;
3065
+
3066
+ /**
3067
+ A message that was generated during the generation process.
3068
+ It can be either an assistant message or a tool message.
3069
+ */
3070
+ type ResponseMessage = AssistantModelMessage | ToolModelMessage;
3071
+
3072
+ /**
3073
+ * The result of a single step in the generation process.
3074
+ */
3075
+ type StepResult<TOOLS extends ToolSet> = {
3076
+ /**
3077
+ The content that was generated in the last step.
3078
+ */
3079
+ readonly content: Array<ContentPart<TOOLS>>;
3080
+ /**
3081
+ The generated text.
3082
+ */
3083
+ readonly text: string;
3084
+ /**
3085
+ The reasoning that was generated during the generation.
3086
+ */
3087
+ readonly reasoning: Array<ReasoningPart>;
3088
+ /**
3089
+ The reasoning text that was generated during the generation.
3090
+ */
3091
+ readonly reasoningText: string | undefined;
3092
+ /**
3093
+ The files that were generated during the generation.
3094
+ */
3095
+ readonly files: Array<GeneratedFile>;
3096
+ /**
3097
+ The sources that were used to generate the text.
3098
+ */
3099
+ readonly sources: Array<Source>;
3100
+ /**
3101
+ The tool calls that were made during the generation.
3102
+ */
3103
+ readonly toolCalls: ToolCallArray<TOOLS>;
3104
+ /**
3105
+ The results of the tool calls.
3106
+ */
3107
+ readonly toolResults: ToolResultArray<TOOLS>;
3108
+ /**
3109
+ The reason why the generation finished.
3110
+ */
3111
+ readonly finishReason: FinishReason;
3112
+ /**
3113
+ The token usage of the generated text.
3114
+ */
3115
+ readonly usage: LanguageModelUsage;
3116
+ /**
3117
+ Warnings from the model provider (e.g. unsupported settings).
3118
+ */
3119
+ readonly warnings: CallWarning[] | undefined;
3120
+ /**
3121
+ Additional request information.
3122
+ */
3123
+ readonly request: LanguageModelRequestMetadata;
3124
+ /**
3125
+ Additional response information.
3126
+ */
3127
+ readonly response: LanguageModelResponseMetadata & {
3128
+ /**
3129
+ The response messages that were generated during the call.
3130
+ Response messages can be either assistant messages or tool messages.
3131
+ They contain a generated id.
3132
+ */
3133
+ readonly messages: Array<ResponseMessage>;
3134
+ /**
3135
+ Response body (available only for providers that use HTTP requests).
3136
+ */
3137
+ body?: unknown;
3138
+ };
3139
+ /**
3140
+ Additional provider-specific metadata. They are passed through
3141
+ from the provider to the AI SDK and enable provider-specific
3142
+ results that can be fully encapsulated in the provider.
3143
+ */
3144
+ readonly providerMetadata: ProviderMetadata | undefined;
3145
+ };
3146
+
3147
+ /**
3148
+ The result of a `generateText` call.
3149
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
3150
+ */
3151
+ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
3152
+ /**
3153
+ The content that was generated in the last step.
3154
+ */
3155
+ readonly content: Array<ContentPart<TOOLS>>;
3156
+ /**
3157
+ The text that was generated in the last step.
3158
+ */
3159
+ readonly text: string;
3160
+ /**
3161
+ The full reasoning that the model has generated in the last step.
3162
+ */
3163
+ readonly reasoning: Array<ReasoningPart>;
3164
+ /**
3165
+ The reasoning text that the model has generated in the last step. Can be undefined if the model
3166
+ has only generated text.
3167
+ */
3168
+ readonly reasoningText: string | undefined;
3169
+ /**
3170
+ The files that were generated in the last step.
3171
+ Empty array if no files were generated.
3172
+ */
3173
+ readonly files: Array<GeneratedFile>;
3174
+ /**
3175
+ Sources that have been used as references in the last step.
3176
+ */
3177
+ readonly sources: Array<Source>;
3178
+ /**
3179
+ The tool calls that were made in the last step.
3180
+ */
3181
+ readonly toolCalls: ToolCallArray<TOOLS>;
3182
+ /**
3183
+ The results of the tool calls from the last step.
3184
+ */
3185
+ readonly toolResults: ToolResultArray<TOOLS>;
3186
+ /**
3187
+ The reason why the generation finished.
3188
+ */
3189
+ readonly finishReason: FinishReason;
2638
3190
  /**
2639
3191
  The token usage of the last step.
2640
3192
  */
@@ -2688,23 +3240,6 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
2688
3240
  readonly experimental_output: OUTPUT;
2689
3241
  }
2690
3242
 
2691
- /**
2692
- Create a type from an object with all keys and nested keys set to optional.
2693
- The helper supports normal objects and Zod schemas (which are resolved automatically).
2694
- It always recurses into arrays.
2695
-
2696
- Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
2697
- */
2698
- type DeepPartial<T> = T extends z3.ZodTypeAny ? DeepPartialInternal<z3.infer<T>> : T extends z4.$ZodType ? DeepPartialInternal<z4.infer<T>> : DeepPartialInternal<T>;
2699
- type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
2700
- type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2701
- type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
2702
- type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2703
- type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
2704
- type PartialObject<ObjectType extends object> = {
2705
- [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
2706
- };
2707
-
2708
3243
  interface Output<OUTPUT, PARTIAL> {
2709
3244
  readonly type: 'object' | 'text';
2710
3245
  responseFormat: LanguageModelV2CallOptions['responseFormat'];
@@ -2888,23 +3423,17 @@ A function that attempts to repair a tool call that failed to parse.
2888
3423
 
2889
3424
  type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
2890
3425
 
2891
- type UIMessageStreamOptions = {
2892
- /**
2893
- * Message ID that is sent to the client if a new message is created.
2894
- * This is intended to be used for the UI message,
2895
- * if the last original message is not an assistant message
2896
- * (in which case that message ID is used).
2897
- */
2898
- newMessageId?: string;
3426
+ type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
2899
3427
  /**
2900
- * The original messages.
3428
+ * The original messages. If they are provided, persistence mode is assumed,
3429
+ * and a message ID is provided for the response message.
2901
3430
  */
2902
- originalMessages?: UIMessage[];
3431
+ originalMessages?: UI_MESSAGE[];
2903
3432
  onFinish?: (options: {
2904
3433
  /**
2905
3434
  * The updates list of UI messages.
2906
3435
  */
2907
- messages: UIMessage[];
3436
+ messages: UI_MESSAGE[];
2908
3437
  /**
2909
3438
  * Indicates whether the response message is a continuation of the last original message,
2910
3439
  * or if a new message was created.
@@ -2914,7 +3443,7 @@ type UIMessageStreamOptions = {
2914
3443
  * The message that was sent to the client as a response
2915
3444
  * (including the original message if it was extended).
2916
3445
  */
2917
- responseMessage: UIMessage;
3446
+ responseMessage: UI_MESSAGE;
2918
3447
  }) => void;
2919
3448
  /**
2920
3449
  * Extracts message metadata that will be send to the client.
@@ -2922,13 +3451,11 @@ type UIMessageStreamOptions = {
2922
3451
  * Called on `start` and `finish` events.
2923
3452
  */
2924
3453
  messageMetadata?: (options: {
2925
- part: TextStreamPart<ToolSet> & {
2926
- type: 'start' | 'finish' | 'start-step' | 'finish-step';
2927
- };
2928
- }) => unknown;
3454
+ part: TextStreamPart<ToolSet>;
3455
+ }) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
2929
3456
  /**
2930
3457
  * Send reasoning parts to the client.
2931
- * Default to false.
3458
+ * Default to true.
2932
3459
  */
2933
3460
  sendReasoning?: boolean;
2934
3461
  /**
@@ -3108,7 +3635,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3108
3635
 
3109
3636
  @return A UI message stream.
3110
3637
  */
3111
- toUIMessageStream(options?: UIMessageStreamOptions): ReadableStream<UIMessageStreamPart>;
3638
+ toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): ReadableStream<InferUIMessageStreamPart<UI_MESSAGE>>;
3112
3639
  /**
3113
3640
  Writes UI message stream output to a Node.js response-like object.
3114
3641
  @param response A Node.js response-like object (ServerResponse).
@@ -3119,7 +3646,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3119
3646
  @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3120
3647
  @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3121
3648
  */
3122
- pipeUIMessageStreamToResponse(response: ServerResponse, options?: ResponseInit & UIMessageStreamOptions): void;
3649
+ pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: ResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
3123
3650
  /**
3124
3651
  Writes text delta output to a Node.js response-like object.
3125
3652
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -3139,7 +3666,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3139
3666
  @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3140
3667
  @return A response object.
3141
3668
  */
3142
- toUIMessageStreamResponse(options?: ResponseInit & UIMessageStreamOptions): Response;
3669
+ toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: ResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
3143
3670
  /**
3144
3671
  Creates a simple text stream response.
3145
3672
  Each text delta is encoded as UTF-8 and sent as a separate chunk.
@@ -3178,6 +3705,9 @@ type TextStreamPart<TOOLS extends ToolSet> = ContentPart<TOOLS> | {
3178
3705
  } | {
3179
3706
  type: 'error';
3180
3707
  error: unknown;
3708
+ } | {
3709
+ type: 'raw';
3710
+ rawValue: unknown;
3181
3711
  };
3182
3712
 
3183
3713
  /**
@@ -3240,7 +3770,7 @@ Callback that is set using the `onChunk` option.
3240
3770
  */
3241
3771
  type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
3242
3772
  chunk: Extract<TextStreamPart<TOOLS>, {
3243
- type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
3773
+ type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result' | 'raw';
3244
3774
  }>;
3245
3775
  }) => Promise<void> | void;
3246
3776
  /**
@@ -3306,7 +3836,7 @@ If set and supported by the model, calls will generate deterministic results.
3306
3836
  @return
3307
3837
  A result object for accessing different stream types and additional information.
3308
3838
  */
3309
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3839
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, includeRawChunks, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3310
3840
  /**
3311
3841
  The language model to use.
3312
3842
  */
@@ -3366,20 +3896,19 @@ A function that attempts to repair a tool call that failed to parse.
3366
3896
  */
3367
3897
  experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
3368
3898
  /**
3369
- Enable streaming of tool call deltas as they are generated. Disabled by default.
3370
- */
3371
- toolCallStreaming?: boolean;
3372
- /**
3373
- @deprecated Use `toolCallStreaming` instead.
3374
- */
3375
- experimental_toolCallStreaming?: boolean;
3376
- /**
3377
3899
  Optional stream transformations.
3378
3900
  They are applied in the order they are provided.
3379
3901
  The stream transformations must maintain the stream structure for streamText to work correctly.
3380
3902
  */
3381
3903
  experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
3382
3904
  /**
3905
+ Whether to include raw chunks from the provider in the stream.
3906
+ When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
3907
+ This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
3908
+ Defaults to false.
3909
+ */
3910
+ includeRawChunks?: boolean;
3911
+ /**
3383
3912
  Callback that is called for each chunk of the stream.
3384
3913
  The stream processing will pause until the callback promise is resolved.
3385
3914
  */
@@ -3706,67 +4235,7 @@ functionality that can be fully encapsulated in the provider.
3706
4235
  }): Promise<GenerateObjectResult<RESULT>>;
3707
4236
 
3708
4237
  /**
3709
- * Calculates the cosine similarity between two vectors. This is a useful metric for
3710
- * comparing the similarity of two vectors such as embeddings.
3711
- *
3712
- * @param vector1 - The first vector.
3713
- * @param vector2 - The second vector.
3714
- *
3715
- * @returns The cosine similarity between vector1 and vector2.
3716
- * @returns 0 if either vector is the zero vector.
3717
- *
3718
- * @throws {InvalidArgumentError} If the vectors do not have the same length.
3719
- */
3720
- declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
3721
-
3722
- /**
3723
- * Converts a data URL of type text/* to a text string.
3724
- */
3725
- declare function getTextFromDataUrl(dataUrl: string): string;
3726
-
3727
- /**
3728
- * Performs a deep-equal comparison of two parsed JSON objects.
3729
- *
3730
- * @param {any} obj1 - The first object to compare.
3731
- * @param {any} obj2 - The second object to compare.
3732
- * @returns {boolean} - Returns true if the two objects are deeply equal, false otherwise.
3733
- */
3734
- declare function isDeepEqualData(obj1: any, obj2: any): boolean;
3735
-
3736
- declare function parsePartialJson(jsonText: string | undefined): Promise<{
3737
- value: JSONValue$1 | undefined;
3738
- state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
3739
- }>;
3740
-
3741
- type Job = () => Promise<void>;
3742
-
3743
- declare class SerialJobExecutor {
3744
- private queue;
3745
- private isProcessing;
3746
- private processQueue;
3747
- run(job: Job): Promise<void>;
3748
- }
3749
-
3750
- /**
3751
- * Creates a ReadableStream that emits the provided values with an optional delay between each value.
3752
- *
3753
- * @param options - The configuration options
3754
- * @param options.chunks - Array of values to be emitted by the stream
3755
- * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
3756
- * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
3757
- * @returns A ReadableStream that emits the provided values
3758
- */
3759
- declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
3760
- chunks: T[];
3761
- initialDelayInMs?: number | null;
3762
- chunkDelayInMs?: number | null;
3763
- _internal?: {
3764
- delay?: (ms: number | null) => Promise<void>;
3765
- };
3766
- }): ReadableStream<T>;
3767
-
3768
- /**
3769
- The result of a `streamObject` call that contains the partial object stream and additional information.
4238
+ The result of a `streamObject` call that contains the partial object stream and additional information.
3770
4239
  */
3771
4240
  interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
3772
4241
  /**
@@ -4240,603 +4709,124 @@ interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2>
4240
4709
  languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
4241
4710
  languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
4242
4711
  textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
4243
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
4244
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
4245
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
4246
- }
4247
- /**
4248
- * Creates a registry for the given providers.
4249
- */
4250
- declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
4251
- separator?: SEPARATOR;
4252
- }): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
4253
- /**
4254
- * @deprecated Use `createProviderRegistry` instead.
4255
- */
4256
- declare const experimental_createProviderRegistry: typeof createProviderRegistry;
4257
-
4258
- /**
4259
- The result of a `transcribe` call.
4260
- It contains the transcript and additional information.
4261
- */
4262
- interface TranscriptionResult {
4263
- /**
4264
- * The complete transcribed text from the audio.
4265
- */
4266
- readonly text: string;
4267
- /**
4268
- * Array of transcript segments with timing information.
4269
- * Each segment represents a portion of the transcribed text with start and end times.
4270
- */
4271
- readonly segments: Array<{
4272
- /**
4273
- * The text content of this segment.
4274
- */
4275
- readonly text: string;
4276
- /**
4277
- * The start time of this segment in seconds.
4278
- */
4279
- readonly startSecond: number;
4280
- /**
4281
- * The end time of this segment in seconds.
4282
- */
4283
- readonly endSecond: number;
4284
- }>;
4285
- /**
4286
- * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4287
- * May be undefined if the language couldn't be detected.
4288
- */
4289
- readonly language: string | undefined;
4290
- /**
4291
- * The total duration of the audio file in seconds.
4292
- * May be undefined if the duration couldn't be determined.
4293
- */
4294
- readonly durationInSeconds: number | undefined;
4295
- /**
4296
- Warnings for the call, e.g. unsupported settings.
4297
- */
4298
- readonly warnings: Array<TranscriptionWarning>;
4299
- /**
4300
- Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4301
- */
4302
- readonly responses: Array<TranscriptionModelResponseMetadata>;
4303
- /**
4304
- Provider metadata from the provider.
4305
- */
4306
- readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4307
- }
4308
-
4309
- /**
4310
- Generates transcripts using a transcription model.
4311
-
4312
- @param model - The transcription model to use.
4313
- @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4314
- @param providerOptions - Additional provider-specific options that are passed through to the provider
4315
- as body parameters.
4316
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4317
- @param abortSignal - An optional abort signal that can be used to cancel the call.
4318
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4319
-
4320
- @returns A result object that contains the generated transcript.
4321
- */
4322
- declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4323
- /**
4324
- The transcription model to use.
4325
- */
4326
- model: TranscriptionModelV1;
4327
- /**
4328
- The audio data to transcribe.
4329
- */
4330
- audio: DataContent | URL;
4331
- /**
4332
- Additional provider-specific options that are passed through to the provider
4333
- as body parameters.
4334
-
4335
- The outer record is keyed by the provider name, and the inner
4336
- record is keyed by the provider-specific metadata key.
4337
- ```ts
4338
- {
4339
- "openai": {
4340
- "temperature": 0
4341
- }
4342
- }
4343
- ```
4344
- */
4345
- providerOptions?: ProviderOptions;
4346
- /**
4347
- Maximum number of retries per transcript model call. Set to 0 to disable retries.
4348
-
4349
- @default 2
4350
- */
4351
- maxRetries?: number;
4352
- /**
4353
- Abort signal.
4354
- */
4355
- abortSignal?: AbortSignal;
4356
- /**
4357
- Additional headers to include in the request.
4358
- Only applicable for HTTP-based providers.
4359
- */
4360
- headers?: Record<string, string>;
4361
- }): Promise<TranscriptionResult>;
4362
-
4363
- type DataUIMessageStreamPart = {
4364
- type: `data-${string}`;
4365
- id?: string;
4366
- data: unknown;
4367
- };
4368
- type UIMessageStreamPart = {
4369
- type: 'text';
4370
- text: string;
4371
- } | {
4372
- type: 'error';
4373
- errorText: string;
4374
- } | {
4375
- type: 'tool-call';
4376
- toolCallId: string;
4377
- toolName: string;
4378
- args: unknown;
4379
- } | {
4380
- type: 'tool-result';
4381
- toolCallId: string;
4382
- result: unknown;
4383
- providerMetadata?: ProviderMetadata;
4384
- } | {
4385
- type: 'tool-call-streaming-start';
4386
- toolCallId: string;
4387
- toolName: string;
4388
- } | {
4389
- type: 'tool-call-delta';
4390
- toolCallId: string;
4391
- argsTextDelta: string;
4392
- } | {
4393
- type: 'reasoning';
4394
- text: string;
4395
- providerMetadata?: ProviderMetadata;
4396
- } | {
4397
- type: 'source-url';
4398
- sourceId: string;
4399
- url: string;
4400
- title?: string;
4401
- providerMetadata?: ProviderMetadata;
4402
- } | {
4403
- type: 'source-document';
4404
- sourceId: string;
4405
- mediaType: string;
4406
- title: string;
4407
- filename?: string;
4408
- providerMetadata?: ProviderMetadata;
4409
- } | {
4410
- type: 'file';
4411
- url: string;
4412
- mediaType: string;
4413
- } | DataUIMessageStreamPart | {
4414
- type: 'metadata';
4415
- metadata: unknown;
4416
- } | {
4417
- type: 'start-step';
4418
- metadata?: unknown;
4419
- } | {
4420
- type: 'finish-step';
4421
- metadata?: unknown;
4422
- } | {
4423
- type: 'start';
4424
- messageId?: string;
4425
- metadata?: unknown;
4426
- } | {
4427
- type: 'finish';
4428
- metadata?: unknown;
4429
- } | {
4430
- type: 'reasoning-part-finish';
4431
- };
4432
-
4433
- interface UIMessageStreamWriter {
4434
- /**
4435
- * Appends a data stream part to the stream.
4436
- */
4437
- write(part: UIMessageStreamPart): void;
4438
- /**
4439
- * Merges the contents of another stream to this stream.
4440
- */
4441
- merge(stream: ReadableStream<UIMessageStreamPart>): void;
4442
- /**
4443
- * Error handler that is used by the data stream writer.
4444
- * This is intended for forwarding when merging streams
4445
- * to prevent duplicated error masking.
4446
- */
4447
- onError: ((error: unknown) => string) | undefined;
4448
- }
4449
-
4450
- declare function createUIMessageStream({ execute, onError, // mask error messages for safety by default
4451
- originalMessages, onFinish, }: {
4452
- execute: (options: {
4453
- writer: UIMessageStreamWriter;
4454
- }) => Promise<void> | void;
4455
- onError?: (error: unknown) => string;
4456
- /**
4457
- * The original messages.
4458
- */
4459
- originalMessages?: UIMessage[];
4460
- onFinish?: (options: {
4461
- /**
4462
- * The updates list of UI messages.
4463
- */
4464
- messages: UIMessage[];
4465
- /**
4466
- * Indicates whether the response message is a continuation of the last original message,
4467
- * or if a new message was created.
4468
- */
4469
- isContinuation: boolean;
4470
- /**
4471
- * The message that was sent to the client as a response
4472
- * (including the original message if it was extended).
4473
- */
4474
- responseMessage: UIMessage;
4475
- }) => void;
4476
- }): ReadableStream<UIMessageStreamPart>;
4477
-
4478
- declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
4479
- stream: ReadableStream<UIMessageStreamPart>;
4480
- }): Response;
4481
-
4482
- declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, }: {
4483
- response: ServerResponse;
4484
- stream: ReadableStream<UIMessageStreamPart>;
4485
- } & ResponseInit): void;
4486
-
4487
- declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
4488
- constructor();
4489
- }
4490
-
4491
- interface ChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
4492
- submitMessages: (options: {
4493
- chatId: string;
4494
- messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4495
- abortSignal: AbortSignal | undefined;
4496
- requestType: 'generate' | 'resume';
4497
- } & ChatRequestOptions) => Promise<ReadableStream<UIMessageStreamPart>>;
4498
- }
4499
-
4500
- type ChatRequestOptions = {
4501
- /**
4502
- Additional headers that should be to be passed to the API endpoint.
4503
- */
4504
- headers?: Record<string, string> | Headers;
4505
- /**
4506
- Additional body JSON properties that should be sent to the API endpoint.
4507
- */
4508
- body?: object;
4509
- metadata?: unknown;
4510
- };
4511
- interface ChatSubscriber {
4512
- onChange: (event: ChatEvent) => void;
4513
- }
4514
- interface ChatEvent {
4515
- type: 'messages-changed' | 'status-changed';
4516
- }
4517
- type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
4518
- interface ChatState<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
4519
- status: ChatStatus;
4520
- error: Error | undefined;
4521
- messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4522
- pushMessage: (message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
4523
- popMessage: () => void;
4524
- replaceMessage: (index: number, message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
4525
- snapshot: <T>(thing: T) => T;
4526
- }
4527
- interface ChatInit<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
4528
- /**
4529
- * A unique identifier for the chat. If not provided, a random one will be
4530
- * generated.
4531
- */
4532
- id?: string;
4533
- messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
4534
- dataPartSchemas?: UI_DATA_PART_SCHEMAS;
4535
- messages?: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4536
- /**
4537
- * A way to provide a function that is going to be used for ids for messages and the chat.
4538
- * If not provided the default AI SDK `generateId` is used.
4539
- */
4540
- generateId?: IdGenerator;
4541
- transport?: ChatTransport<NoInfer<MESSAGE_METADATA>, NoInfer<InferUIDataParts<UI_DATA_PART_SCHEMAS>>>;
4542
- maxSteps?: number;
4543
- /**
4544
- * Callback function to be called when an error is encountered.
4545
- */
4546
- onError?: (error: Error) => void;
4547
- /**
4548
- Optional callback function that is invoked when a tool call is received.
4549
- Intended for automatic client-side tool execution.
4550
-
4551
- You can optionally return a result for the tool call,
4552
- either synchronously or asynchronously.
4553
- */
4554
- onToolCall?: ({ toolCall, }: {
4555
- toolCall: ToolCall<string, unknown>;
4556
- }) => void | Promise<unknown> | unknown;
4557
- /**
4558
- * Optional callback function that is called when the assistant message is finished streaming.
4559
- *
4560
- * @param message The message that was streamed.
4561
- */
4562
- onFinish?: (options: {
4563
- message: UIMessage<NoInfer<MESSAGE_METADATA>, NoInfer<InferUIDataParts<UI_DATA_PART_SCHEMAS>>>;
4564
- }) => void;
4565
- }
4566
- declare abstract class AbstractChat<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
4567
- readonly id: string;
4568
- readonly generateId: IdGenerator;
4569
- protected state: ChatState<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4570
- private readonly subscribers;
4571
- private messageMetadataSchema;
4572
- private dataPartSchemas;
4573
- private readonly transport;
4574
- private maxSteps;
4575
- private onError?;
4576
- private onToolCall?;
4577
- private onFinish?;
4578
- private activeResponse;
4579
- private jobExecutor;
4580
- constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, }: Omit<ChatInit<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>, 'messages'> & {
4581
- state: ChatState<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4582
- });
4583
- /**
4584
- * Hook status:
4585
- *
4586
- * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
4587
- * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
4588
- * - `ready`: The full response has been received and processed; a new user message can be submitted.
4589
- * - `error`: An error occurred during the API request, preventing successful completion.
4590
- */
4591
- get status(): ChatStatus;
4592
- protected setStatus({ status, error, }: {
4593
- status: ChatStatus;
4594
- error?: Error;
4595
- }): void;
4596
- get error(): Error | undefined;
4597
- get messages(): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4598
- get lastMessage(): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> | undefined;
4599
- subscribe(subscriber: ChatSubscriber): () => void;
4600
- set messages(messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[]);
4601
- removeAssistantResponse: () => void;
4602
- /**
4603
- * Append a user message to the chat list. This triggers the API call to fetch
4604
- * the assistant's response.
4605
- */
4606
- sendMessage: (message: (CreateUIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> & {
4607
- text?: never;
4608
- files?: never;
4609
- }) | {
4610
- text: string;
4611
- files?: FileList | FileUIPart[];
4612
- metadata?: MESSAGE_METADATA;
4613
- parts?: never;
4614
- } | {
4615
- files: FileList | FileUIPart[];
4616
- metadata?: MESSAGE_METADATA;
4617
- parts?: never;
4618
- }, options?: ChatRequestOptions) => Promise<void>;
4619
- /**
4620
- * Regenerate the last assistant message.
4621
- */
4622
- reload: (options?: ChatRequestOptions) => Promise<void>;
4623
- /**
4624
- * Resume an ongoing chat generation stream. This does not resume an aborted generation.
4625
- */
4626
- experimental_resume: (options?: ChatRequestOptions) => Promise<void>;
4627
- addToolResult: ({ toolCallId, result, }: {
4628
- toolCallId: string;
4629
- result: unknown;
4630
- }) => Promise<void>;
4631
- /**
4632
- * Abort the current request immediately, keep the generated tokens if any.
4633
- */
4634
- stop: () => Promise<void>;
4635
- private emit;
4636
- private triggerRequest;
4712
+ textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
4713
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
4714
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
4637
4715
  }
4638
-
4639
- declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
4640
-
4641
4716
  /**
4642
- Converts an array of messages from useChat into an array of CoreMessages that can be used
4643
- with the AI core functions (e.g. `streamText`).
4717
+ * Creates a registry for the given providers.
4644
4718
  */
4645
- declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
4646
- tools?: TOOLS;
4647
- }): ModelMessage[];
4719
+ declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
4720
+ separator?: SEPARATOR;
4721
+ }): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
4648
4722
  /**
4649
- @deprecated Use `convertToModelMessages` instead.
4723
+ * @deprecated Use `createProviderRegistry` instead.
4650
4724
  */
4651
- declare const convertToCoreMessages: typeof convertToModelMessages;
4652
-
4653
- type PrepareRequest<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = (options: {
4654
- id: string;
4655
- messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4656
- requestMetadata: unknown;
4657
- body: Record<string, any> | undefined;
4658
- credentials: RequestCredentials | undefined;
4659
- headers: HeadersInit | undefined;
4660
- }) => {
4661
- body: object;
4662
- headers?: HeadersInit;
4663
- credentials?: RequestCredentials;
4664
- };
4665
-
4666
- declare class DefaultChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> implements ChatTransport<MESSAGE_METADATA, DATA_TYPES> {
4667
- private api;
4668
- private credentials?;
4669
- private headers?;
4670
- private body?;
4671
- private fetch?;
4672
- private prepareRequest?;
4673
- constructor({ api, credentials, headers, body, fetch, prepareRequest, }?: {
4674
- api?: string;
4675
- /**
4676
- * The credentials mode to be used for the fetch request.
4677
- * Possible values are: 'omit', 'same-origin', 'include'.
4678
- * Defaults to 'same-origin'.
4679
- */
4680
- credentials?: RequestCredentials;
4681
- /**
4682
- * HTTP headers to be sent with the API request.
4683
- */
4684
- headers?: Record<string, string> | Headers;
4685
- /**
4686
- * Extra body object to be sent with the API request.
4687
- * @example
4688
- * Send a `sessionId` to the API along with the messages.
4689
- * ```js
4690
- * useChat({
4691
- * body: {
4692
- * sessionId: '123',
4693
- * }
4694
- * })
4695
- * ```
4696
- */
4697
- body?: object;
4698
- /**
4699
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4700
- or to provide a custom fetch implementation for e.g. testing.
4701
- */
4702
- fetch?: FetchFunction;
4703
- /**
4704
- * When a function is provided, it will be used
4705
- * to prepare the request body for the chat API. This can be useful for
4706
- * customizing the request body based on the messages and data in the chat.
4707
- *
4708
- * @param id The id of the chat.
4709
- * @param messages The current messages in the chat.
4710
- * @param requestBody The request body object passed in the chat request.
4711
- */
4712
- prepareRequest?: PrepareRequest<MESSAGE_METADATA, DATA_TYPES>;
4713
- });
4714
- submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<MESSAGE_METADATA, DATA_TYPES>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
4715
- }
4716
-
4717
- declare function getToolInvocations(message: UIMessage): ToolInvocation[];
4725
+ declare const experimental_createProviderRegistry: typeof createProviderRegistry;
4718
4726
 
4719
- declare class TextStreamChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> implements ChatTransport<MESSAGE_METADATA, DATA_TYPES> {
4720
- private api;
4721
- private credentials?;
4722
- private headers?;
4723
- private body?;
4724
- private fetch?;
4725
- private prepareRequest?;
4726
- constructor({ api, credentials, headers, body, fetch, prepareRequest, }: {
4727
- api: string;
4728
- /**
4729
- * The credentials mode to be used for the fetch request.
4730
- * Possible values are: 'omit', 'same-origin', 'include'.
4731
- * Defaults to 'same-origin'.
4732
- */
4733
- credentials?: RequestCredentials;
4727
+ /**
4728
+ The result of a `transcribe` call.
4729
+ It contains the transcript and additional information.
4730
+ */
4731
+ interface TranscriptionResult {
4732
+ /**
4733
+ * The complete transcribed text from the audio.
4734
+ */
4735
+ readonly text: string;
4736
+ /**
4737
+ * Array of transcript segments with timing information.
4738
+ * Each segment represents a portion of the transcribed text with start and end times.
4739
+ */
4740
+ readonly segments: Array<{
4734
4741
  /**
4735
- * HTTP headers to be sent with the API request.
4742
+ * The text content of this segment.
4736
4743
  */
4737
- headers?: Record<string, string> | Headers;
4744
+ readonly text: string;
4738
4745
  /**
4739
- * Extra body object to be sent with the API request.
4740
- * @example
4741
- * Send a `sessionId` to the API along with the messages.
4742
- * ```js
4743
- * useChat({
4744
- * body: {
4745
- * sessionId: '123',
4746
- * }
4747
- * })
4748
- * ```
4746
+ * The start time of this segment in seconds.
4749
4747
  */
4750
- body?: object;
4751
- /**
4752
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4753
- or to provide a custom fetch implementation for e.g. testing.
4754
- */
4755
- fetch?: FetchFunction;
4748
+ readonly startSecond: number;
4756
4749
  /**
4757
- * When a function is provided, it will be used
4758
- * to prepare the request body for the chat API. This can be useful for
4759
- * customizing the request body based on the messages and data in the chat.
4760
- *
4761
- * @param id The id of the chat.
4762
- * @param messages The current messages in the chat.
4763
- * @param requestBody The request body object passed in the chat request.
4750
+ * The end time of this segment in seconds.
4764
4751
  */
4765
- prepareRequest?: NoInfer<PrepareRequest<MESSAGE_METADATA, DATA_TYPES>>;
4766
- });
4767
- submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<MESSAGE_METADATA, DATA_TYPES>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
4768
- }
4769
-
4770
- type CompletionRequestOptions = {
4771
- /**
4772
- An optional object of headers to be passed to the API endpoint.
4773
- */
4774
- headers?: Record<string, string> | Headers;
4775
- /**
4776
- An optional object to be passed to the API endpoint.
4777
- */
4778
- body?: object;
4779
- };
4780
- type UseCompletionOptions = {
4752
+ readonly endSecond: number;
4753
+ }>;
4781
4754
  /**
4782
- * The API endpoint that accepts a `{ prompt: string }` object and returns
4783
- * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
4755
+ * The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
4756
+ * May be undefined if the language couldn't be detected.
4784
4757
  */
4785
- api?: string;
4758
+ readonly language: string | undefined;
4786
4759
  /**
4787
- * An unique identifier for the chat. If not provided, a random one will be
4788
- * generated. When provided, the `useChat` hook with the same `id` will
4789
- * have shared states across components.
4760
+ * The total duration of the audio file in seconds.
4761
+ * May be undefined if the duration couldn't be determined.
4790
4762
  */
4791
- id?: string;
4763
+ readonly durationInSeconds: number | undefined;
4792
4764
  /**
4793
- * Initial prompt input of the completion.
4794
- */
4795
- initialInput?: string;
4765
+ Warnings for the call, e.g. unsupported settings.
4766
+ */
4767
+ readonly warnings: Array<TranscriptionWarning>;
4796
4768
  /**
4797
- * Initial completion result. Useful to load an existing history.
4769
+ Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
4798
4770
  */
4799
- initialCompletion?: string;
4771
+ readonly responses: Array<TranscriptionModelResponseMetadata>;
4800
4772
  /**
4801
- * Callback function to be called when the completion is finished streaming.
4773
+ Provider metadata from the provider.
4802
4774
  */
4803
- onFinish?: (prompt: string, completion: string) => void;
4775
+ readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
4776
+ }
4777
+
4778
+ /**
4779
+ Generates transcripts using a transcription model.
4780
+
4781
+ @param model - The transcription model to use.
4782
+ @param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
4783
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
4784
+ as body parameters.
4785
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4786
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4787
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4788
+
4789
+ @returns A result object that contains the generated transcript.
4790
+ */
4791
+ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4804
4792
  /**
4805
- * Callback function to be called when an error is encountered.
4806
- */
4807
- onError?: (error: Error) => void;
4793
+ The transcription model to use.
4794
+ */
4795
+ model: TranscriptionModelV1;
4808
4796
  /**
4809
- * The credentials mode to be used for the fetch request.
4810
- * Possible values are: 'omit', 'same-origin', 'include'.
4811
- * Defaults to 'same-origin'.
4797
+ The audio data to transcribe.
4812
4798
  */
4813
- credentials?: RequestCredentials;
4799
+ audio: DataContent | URL;
4814
4800
  /**
4815
- * HTTP headers to be sent with the API request.
4816
- */
4817
- headers?: Record<string, string> | Headers;
4801
+ Additional provider-specific options that are passed through to the provider
4802
+ as body parameters.
4803
+
4804
+ The outer record is keyed by the provider name, and the inner
4805
+ record is keyed by the provider-specific metadata key.
4806
+ ```ts
4807
+ {
4808
+ "openai": {
4809
+ "temperature": 0
4810
+ }
4811
+ }
4812
+ ```
4813
+ */
4814
+ providerOptions?: ProviderOptions;
4818
4815
  /**
4819
- * Extra body object to be sent with the API request.
4820
- * @example
4821
- * Send a `sessionId` to the API along with the prompt.
4822
- * ```js
4823
- * useChat({
4824
- * body: {
4825
- * sessionId: '123',
4826
- * }
4827
- * })
4828
- * ```
4816
+ Maximum number of retries per transcript model call. Set to 0 to disable retries.
4817
+
4818
+ @default 2
4829
4819
  */
4830
- body?: object;
4820
+ maxRetries?: number;
4831
4821
  /**
4832
- Streaming protocol that is used. Defaults to `data`.
4833
- */
4834
- streamProtocol?: 'data' | 'text';
4822
+ Abort signal.
4823
+ */
4824
+ abortSignal?: AbortSignal;
4835
4825
  /**
4836
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4837
- or to provide a custom fetch implementation for e.g. testing.
4838
- */
4839
- fetch?: FetchFunction;
4840
- };
4826
+ Additional headers to include in the request.
4827
+ Only applicable for HTTP-based providers.
4828
+ */
4829
+ headers?: Record<string, string>;
4830
+ }): Promise<TranscriptionResult>;
4841
4831
 
4842
- export { AbstractChat, AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatEvent, ChatInit, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GLOBAL_DEFAULT_PROVIDER, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamChatTransport, TextStreamPart, TextUIPart, Tool, ToolCallOptions, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseCompletionOptions, UserContent, UserModelMessage, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, hasToolCall, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };
4832
+ export { AbstractChat, AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatInit, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GLOBAL_DEFAULT_PROVIDER, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamChatTransport, TextStreamPart, TextUIPart, Tool, ToolCallOptions, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseCompletionOptions, UserContent, UserModelMessage, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, hasToolCall, isDeepEqualData, isToolUIPart, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };