ai 5.0.0-alpha.1 → 5.0.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,9 @@
1
- import { ToolResultContent, Schema, ToolCall, ToolResult, IdGenerator, FetchFunction, Validator, StandardSchemaV1 } from '@ai-sdk/provider-utils';
1
+ import { ToolResultContent, Schema, ToolCall, ToolResult, Validator, StandardSchemaV1, IdGenerator, InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
2
2
  export { IdGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
3
3
  import { AISDKError, SharedV2ProviderMetadata, SharedV2ProviderOptions, EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, LanguageModelV2Usage, JSONObject, LanguageModelV2ToolCall, JSONSchema7, LanguageModelV2CallOptions, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
4
4
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
5
+ import * as z3 from 'zod/v3';
6
+ import * as z4 from 'zod/v4/core';
5
7
  import { z } from 'zod';
6
8
  import { ServerResponse } from 'node:http';
7
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
@@ -382,7 +384,7 @@ type JSONValue = JSONValue$1;
382
384
  /**
383
385
  Language model that is used by the AI SDK Core functions.
384
386
  */
385
- type LanguageModel = LanguageModelV2;
387
+ type LanguageModel = string | LanguageModelV2;
386
388
  /**
387
389
  Reason why a language model finished generating a response.
388
390
 
@@ -878,7 +880,7 @@ type MCPTransportConfig = {
878
880
  headers?: Record<string, string>;
879
881
  };
880
882
 
881
- type ToolParameters<T = JSONObject> = z.Schema<T> | Schema<T>;
883
+ type ToolParameters<T = JSONObject> = z4.$ZodType<T> | z3.Schema<T> | Schema<T>;
882
884
  interface ToolExecutionOptions {
883
885
  /**
884
886
  * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
@@ -960,7 +962,7 @@ type ToolSchemas = Record<string, {
960
962
  parameters: ToolParameters<JSONObject | unknown>;
961
963
  }> | 'automatic' | undefined;
962
964
  type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
963
- parameters: ToolParameters<any>;
965
+ parameters: ToolParameters<unknown>;
964
966
  }> ? {
965
967
  [K in keyof TOOL_SCHEMAS]: MappedTool<TOOL_SCHEMAS[K], CallToolResult> & Required<Pick<MappedTool<TOOL_SCHEMAS[K], CallToolResult>, 'execute'>>;
966
968
  } : McpToolSet<Record<string, {
@@ -1900,6 +1902,8 @@ type Prompt = {
1900
1902
  messages?: Array<ModelMessage>;
1901
1903
  };
1902
1904
 
1905
+ declare const GLOBAL_DEFAULT_PROVIDER: unique symbol;
1906
+
1903
1907
  /**
1904
1908
  * A function that attempts to repair a tool call that failed to parse.
1905
1909
  *
@@ -2114,13 +2118,10 @@ It is optional for backwards compatibility.
2114
2118
  */
2115
2119
  type ToolInvocation = ({
2116
2120
  state: 'partial-call';
2117
- step?: number;
2118
2121
  } & ToolCall<string, any>) | ({
2119
2122
  state: 'call';
2120
- step?: number;
2121
2123
  } & ToolCall<string, any>) | ({
2122
2124
  state: 'result';
2123
- step?: number;
2124
2125
  } & ToolResult<string, any, any>);
2125
2126
  /**
2126
2127
  The data types that can be used in the UI message for the UI message data parts.
@@ -2154,7 +2155,7 @@ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataT
2154
2155
  */
2155
2156
  parts: Array<UIMessagePart<DATA_PARTS>>;
2156
2157
  }
2157
- type UIMessagePart<DATA_TYPES extends UIDataTypes> = TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
2158
+ type UIMessagePart<DATA_TYPES extends UIDataTypes> = TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUrlUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
2158
2159
  type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
2159
2160
  [NAME in keyof DATA_TYPES & string]: {
2160
2161
  type: `data-${NAME}`;
@@ -2162,6 +2163,10 @@ type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
2162
2163
  data: DATA_TYPES[NAME];
2163
2164
  };
2164
2165
  }>;
2166
+ type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
2167
+ type InferUIDataParts<T extends UIDataPartSchemas> = {
2168
+ [K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
2169
+ };
2165
2170
  /**
2166
2171
  * A text part of a message.
2167
2172
  */
@@ -2199,18 +2204,12 @@ type ToolInvocationUIPart = {
2199
2204
  /**
2200
2205
  * A source part of a message.
2201
2206
  */
2202
- type SourceUIPart = {
2203
- type: 'source';
2204
- /**
2205
- * The source.
2206
- */
2207
- source: {
2208
- sourceType: 'url';
2209
- id: string;
2210
- url: string;
2211
- title?: string;
2212
- providerMetadata?: Record<string, any>;
2213
- };
2207
+ type SourceUrlUIPart = {
2208
+ type: 'source-url';
2209
+ sourceId: string;
2210
+ url: string;
2211
+ title?: string;
2212
+ providerMetadata?: Record<string, any>;
2214
2213
  };
2215
2214
  /**
2216
2215
  * A file part of a message.
@@ -2239,8 +2238,9 @@ type FileUIPart = {
2239
2238
  type StepStartUIPart = {
2240
2239
  type: 'step-start';
2241
2240
  };
2242
- type CreateUIMessage<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = Omit<UIMessage<METADATA, DATA_TYPES>, 'id'> & {
2241
+ type CreateUIMessage<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = Omit<UIMessage<METADATA, DATA_TYPES>, 'id' | 'role'> & {
2243
2242
  id?: UIMessage<METADATA, DATA_TYPES>['id'];
2243
+ role?: UIMessage<METADATA, DATA_TYPES>['role'];
2244
2244
  };
2245
2245
 
2246
2246
  declare const symbol$3: unique symbol;
@@ -2294,15 +2294,22 @@ declare function pipeTextStreamToResponse({ response, status, statusText, header
2294
2294
  textStream: ReadableStream<string>;
2295
2295
  } & ResponseInit): void;
2296
2296
 
2297
- /**
2298
- * Appends a client message to the messages array.
2299
- * If the last message in the array has the same id as the new message, it will be replaced.
2300
- * Otherwise, the new message will be appended.
2301
- */
2302
- declare function appendClientMessage({ messages, message, }: {
2303
- messages: UIMessage[];
2304
- message: UIMessage;
2305
- }): UIMessage<unknown, UIDataTypes>[];
2297
+ declare const getOriginalFetch: () => typeof fetch;
2298
+ declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
2299
+ api: string;
2300
+ prompt: string;
2301
+ credentials: RequestCredentials | undefined;
2302
+ headers: HeadersInit | undefined;
2303
+ body: Record<string, any>;
2304
+ streamProtocol: 'data' | 'text' | undefined;
2305
+ setCompletion: (completion: string) => void;
2306
+ setLoading: (loading: boolean) => void;
2307
+ setError: (error: Error | undefined) => void;
2308
+ setAbortController: (abortController: AbortController | null) => void;
2309
+ onFinish: ((prompt: string, completion: string) => void) | undefined;
2310
+ onError: ((error: Error) => void) | undefined;
2311
+ fetch: ReturnType<typeof getOriginalFetch> | undefined;
2312
+ }): Promise<string | null | undefined>;
2306
2313
 
2307
2314
  /**
2308
2315
  The result of an `embed` call.
@@ -2658,7 +2665,7 @@ It always recurses into arrays.
2658
2665
 
2659
2666
  Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
2660
2667
  */
2661
- type DeepPartial<T> = T extends z.ZodTypeAny ? DeepPartialInternal<z.infer<T>> : DeepPartialInternal<T>;
2668
+ type DeepPartial<T> = T extends z3.ZodTypeAny ? DeepPartialInternal<z3.infer<T>> : T extends z4.$ZodType ? DeepPartialInternal<z4.infer<T>> : DeepPartialInternal<T>;
2662
2669
  type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
2663
2670
  type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
2664
2671
  type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
@@ -2668,7 +2675,7 @@ type PartialObject<ObjectType extends object> = {
2668
2675
  [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
2669
2676
  };
2670
2677
 
2671
- interface Output$1<OUTPUT, PARTIAL> {
2678
+ interface Output<OUTPUT, PARTIAL> {
2672
2679
  readonly type: 'object' | 'text';
2673
2680
  responseFormat: LanguageModelV2CallOptions['responseFormat'];
2674
2681
  parsePartial(options: {
@@ -2684,25 +2691,49 @@ interface Output$1<OUTPUT, PARTIAL> {
2684
2691
  finishReason: FinishReason;
2685
2692
  }): Promise<OUTPUT>;
2686
2693
  }
2687
- declare const text: () => Output$1<string, string>;
2694
+ declare const text: () => Output<string, string>;
2688
2695
  declare const object: <OUTPUT>({ schema: inputSchema, }: {
2689
- schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
2690
- }) => Output$1<OUTPUT, DeepPartial<OUTPUT>>;
2696
+ schema: z4.$ZodType<OUTPUT, any> | z3.Schema<OUTPUT, z3.ZodTypeDef, any> | Schema<OUTPUT>;
2697
+ }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
2691
2698
 
2699
+ type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
2692
2700
  declare const output_object: typeof object;
2693
2701
  declare const output_text: typeof text;
2694
2702
  declare namespace output {
2695
2703
  export {
2696
- Output$1 as Output,
2704
+ output_Output as Output,
2697
2705
  output_object as object,
2698
2706
  output_text as text,
2699
2707
  };
2700
2708
  }
2701
2709
 
2710
+ /**
2711
+ Function that you can use to provide different settings for a step.
2712
+
2713
+ @param options - The options for the step.
2714
+ @param options.steps - The steps that have been executed so far.
2715
+ @param options.stepNumber - The number of the step that is being executed.
2716
+ @param options.model - The model that is being used.
2717
+
2718
+ @returns An object that contains the settings for the step.
2719
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2720
+ */
2721
+ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
2722
+ steps: Array<StepResult<NoInfer<TOOLS>>>;
2723
+ stepNumber: number;
2724
+ model: LanguageModel;
2725
+ }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
2726
+ type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
2727
+ model?: LanguageModel;
2728
+ toolChoice?: ToolChoice<NoInfer<TOOLS>>;
2729
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
2730
+ system?: string;
2731
+ } | undefined;
2732
+
2702
2733
  type StopCondition<TOOLS extends ToolSet> = (options: {
2703
2734
  steps: Array<StepResult<TOOLS>>;
2704
2735
  }) => PromiseLike<boolean> | boolean;
2705
- declare function maxSteps(maxSteps: number): StopCondition<any>;
2736
+ declare function stepCountIs(stepCount: number): StopCondition<any>;
2706
2737
  declare function hasToolCall(toolName: string): StopCondition<any>;
2707
2738
 
2708
2739
  /**
@@ -2757,7 +2788,7 @@ If set and supported by the model, calls will generate deterministic results.
2757
2788
  @returns
2758
2789
  A result object that contains the generated text, the results of the tool calls, and additional information.
2759
2790
  */
2760
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, continueUntil, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2791
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
2761
2792
  /**
2762
2793
  The language model to use.
2763
2794
  */
@@ -2770,7 +2801,13 @@ The tools that the model can call. The model needs to support calling tools.
2770
2801
  The tool choice strategy. Default: 'auto'.
2771
2802
  */
2772
2803
  toolChoice?: ToolChoice<NoInfer<TOOLS>>;
2773
- continueUntil?: StopCondition<NoInfer<TOOLS>>;
2804
+ /**
2805
+ Condition for stopping the generation when there are tool results in the last step.
2806
+ When the condition is an array, any of the conditions can be met to stop the generation.
2807
+
2808
+ @default stepCountIs(1)
2809
+ */
2810
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2774
2811
  /**
2775
2812
  Optional telemetry configuration (experimental).
2776
2813
  */
@@ -2782,34 +2819,26 @@ functionality that can be fully encapsulated in the provider.
2782
2819
  */
2783
2820
  providerOptions?: ProviderOptions;
2784
2821
  /**
2822
+ * @deprecated Use `activeTools` instead.
2823
+ */
2824
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2825
+ /**
2785
2826
  Limits the tools that are available for the model to call without
2786
2827
  changing the tool call and result types in the result.
2787
2828
  */
2788
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2829
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
2789
2830
  /**
2790
2831
  Optional specification for parsing structured outputs from the LLM response.
2791
2832
  */
2792
- experimental_output?: Output$1<OUTPUT, OUTPUT_PARTIAL>;
2833
+ experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
2834
+ /**
2835
+ * @deprecated Use `prepareStep` instead.
2836
+ */
2837
+ experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2793
2838
  /**
2794
2839
  Optional function that you can use to provide different settings for a step.
2795
-
2796
- @param options - The options for the step.
2797
- @param options.steps - The steps that have been executed so far.
2798
- @param options.stepNumber - The number of the step that is being executed.
2799
- @param options.model - The model that is being used.
2800
-
2801
- @returns An object that contains the settings for the step.
2802
- If you return undefined (or for undefined settings), the settings from the outer level will be used.
2803
2840
  */
2804
- experimental_prepareStep?: (options: {
2805
- steps: Array<StepResult<NoInfer<TOOLS>>>;
2806
- stepNumber: number;
2807
- model: LanguageModel;
2808
- }) => PromiseLike<{
2809
- model?: LanguageModel;
2810
- toolChoice?: ToolChoice<NoInfer<TOOLS>>;
2811
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2812
- } | undefined>;
2841
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2813
2842
  /**
2814
2843
  A function that attempts to repair a tool call that failed to parse.
2815
2844
  */
@@ -2883,7 +2912,7 @@ type UIMessageStreamOptions = {
2883
2912
  * that send additional data.
2884
2913
  * Default to true.
2885
2914
  */
2886
- experimental_sendFinish?: boolean;
2915
+ sendFinish?: boolean;
2887
2916
  /**
2888
2917
  * Send the message start event to the client.
2889
2918
  * Set to false if you are using additional streamText calls
@@ -2895,7 +2924,7 @@ type UIMessageStreamOptions = {
2895
2924
  * streamText calls that send additional data to prevent
2896
2925
  * the message start event from being sent multiple times.
2897
2926
  */
2898
- experimental_sendStart?: boolean;
2927
+ sendStart?: boolean;
2899
2928
  /**
2900
2929
  * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
2901
2930
  *
@@ -3247,7 +3276,7 @@ If set and supported by the model, calls will generate deterministic results.
3247
3276
  @return
3248
3277
  A result object for accessing different stream types and additional information.
3249
3278
  */
3250
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3279
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3251
3280
  /**
3252
3281
  The language model to use.
3253
3282
  */
@@ -3261,13 +3290,12 @@ The tool choice strategy. Default: 'auto'.
3261
3290
  */
3262
3291
  toolChoice?: ToolChoice<TOOLS>;
3263
3292
  /**
3264
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
3293
+ Condition for stopping the generation when there are tool results in the last step.
3294
+ When the condition is an array, any of the conditions can be met to stop the generation.
3265
3295
 
3266
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
3267
-
3268
- By default, it's set to 1, which means that only a single LLM call is made.
3269
- */
3270
- maxSteps?: number;
3296
+ @default stepCountIs(1)
3297
+ */
3298
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
3271
3299
  /**
3272
3300
  Optional telemetry configuration (experimental).
3273
3301
  */
@@ -3279,14 +3307,30 @@ functionality that can be fully encapsulated in the provider.
3279
3307
  */
3280
3308
  providerOptions?: ProviderOptions;
3281
3309
  /**
3282
- Limits the tools that are available for the model to call without
3283
- changing the tool call and result types in the result.
3310
+ * @deprecated Use `activeTools` instead.
3284
3311
  */
3285
- experimental_activeTools?: Array<keyof TOOLS>;
3312
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
3313
+ /**
3314
+ Limits the tools that are available for the model to call without
3315
+ changing the tool call and result types in the result.
3316
+ */
3317
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
3286
3318
  /**
3287
3319
  Optional specification for parsing structured outputs from the LLM response.
3288
3320
  */
3289
- experimental_output?: Output$1<OUTPUT, PARTIAL_OUTPUT>;
3321
+ experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
3322
+ /**
3323
+ Optional function that you can use to provide different settings for a step.
3324
+
3325
+ @param options - The options for the step.
3326
+ @param options.steps - The steps that have been executed so far.
3327
+ @param options.stepNumber - The number of the step that is being executed.
3328
+ @param options.model - The model that is being used.
3329
+
3330
+ @returns An object that contains the settings for the step.
3331
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
3332
+ */
3333
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
3290
3334
  /**
3291
3335
  A function that attempts to repair a tool call that failed to parse.
3292
3336
  */
@@ -3563,14 +3607,14 @@ functionality that can be fully encapsulated in the provider.
3563
3607
  @returns
3564
3608
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
3565
3609
  */
3566
- declare function generateObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
3610
+ declare function generateObject<SCHEMA extends z3.Schema | z4.$ZodType | Schema = z4.$ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
3567
3611
  /**
3568
3612
  The enum values that the model should use.
3569
3613
  */
3570
3614
  enum: Array<RESULT>;
3571
3615
  mode?: 'json';
3572
3616
  output: 'enum';
3573
- } : Output extends 'no-schema' ? {} : {
3617
+ } : OUTPUT extends 'no-schema' ? {} : {
3574
3618
  /**
3575
3619
  The schema of the object that the model should generate.
3576
3620
  */
@@ -3602,7 +3646,7 @@ Default and recommended: 'auto' (best mode for the model).
3602
3646
  */
3603
3647
  mode?: 'auto' | 'json' | 'tool';
3604
3648
  }) & {
3605
- output?: Output;
3649
+ output?: OUTPUT;
3606
3650
  /**
3607
3651
  The language model to use.
3608
3652
  */
@@ -3631,6 +3675,20 @@ functionality that can be fully encapsulated in the provider.
3631
3675
  };
3632
3676
  }): Promise<GenerateObjectResult<RESULT>>;
3633
3677
 
3678
+ /**
3679
+ * Calculates the cosine similarity between two vectors. This is a useful metric for
3680
+ * comparing the similarity of two vectors such as embeddings.
3681
+ *
3682
+ * @param vector1 - The first vector.
3683
+ * @param vector2 - The second vector.
3684
+ *
3685
+ * @returns The cosine similarity between vector1 and vector2.
3686
+ * @returns 0 if either vector is the zero vector.
3687
+ *
3688
+ * @throws {InvalidArgumentError} If the vectors do not have the same length.
3689
+ */
3690
+ declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
3691
+
3634
3692
  /**
3635
3693
  * Converts a data URL of type text/* to a text string.
3636
3694
  */
@@ -3650,19 +3708,14 @@ declare function parsePartialJson(jsonText: string | undefined): Promise<{
3650
3708
  state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
3651
3709
  }>;
3652
3710
 
3653
- /**
3654
- * Calculates the cosine similarity between two vectors. This is a useful metric for
3655
- * comparing the similarity of two vectors such as embeddings.
3656
- *
3657
- * @param vector1 - The first vector.
3658
- * @param vector2 - The second vector.
3659
- *
3660
- * @returns The cosine similarity between vector1 and vector2.
3661
- * @returns 0 if either vector is the zero vector.
3662
- *
3663
- * @throws {InvalidArgumentError} If the vectors do not have the same length.
3664
- */
3665
- declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
3711
+ type Job = () => Promise<void>;
3712
+
3713
+ declare class SerialJobExecutor {
3714
+ private queue;
3715
+ private isProcessing;
3716
+ private processQueue;
3717
+ run(job: Job): Promise<void>;
3718
+ }
3666
3719
 
3667
3720
  /**
3668
3721
  * Creates a ReadableStream that emits the provided values with an optional delay between each value.
@@ -3871,14 +3924,14 @@ functionality that can be fully encapsulated in the provider.
3871
3924
  @returns
3872
3925
  A result object for accessing the partial object stream and additional information.
3873
3926
  */
3874
- declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
3927
+ declare function streamObject<SCHEMA extends z3.Schema | z4.$ZodType | Schema = z4.$ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
3875
3928
  /**
3876
3929
  The enum values that the model should use.
3877
3930
  */
3878
3931
  enum: Array<RESULT>;
3879
3932
  mode?: 'json';
3880
3933
  output: 'enum';
3881
- } : Output extends 'no-schema' ? {} : {
3934
+ } : OUTPUT extends 'no-schema' ? {} : {
3882
3935
  /**
3883
3936
  The schema of the object that the model should generate.
3884
3937
  */
@@ -3910,7 +3963,7 @@ Default and recommended: 'auto' (best mode for the model).
3910
3963
  */
3911
3964
  mode?: 'auto' | 'json' | 'tool';
3912
3965
  }) & {
3913
- output?: Output;
3966
+ output?: OUTPUT;
3914
3967
  /**
3915
3968
  The language model to use.
3916
3969
  */
@@ -3943,7 +3996,7 @@ Callback that is called when the LLM response and the final object validation ar
3943
3996
  currentDate?: () => Date;
3944
3997
  now?: () => number;
3945
3998
  };
3946
- }): StreamObjectResult<Output extends 'enum' ? string : Output extends 'array' ? RESULT : DeepPartial<RESULT>, Output extends 'array' ? RESULT : RESULT, Output extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
3999
+ }): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
3947
4000
 
3948
4001
  /**
3949
4002
  * A generated audio file.
@@ -4121,15 +4174,15 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
4121
4174
  *
4122
4175
  * @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
4123
4176
  */
4124
- declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModel>, EMBEDDING_MODELS extends Record<string, EmbeddingModel<string>>, IMAGE_MODELS extends Record<string, ImageModel>>({ languageModels, textEmbeddingModels, imageModels, fallbackProvider, }: {
4177
+ declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>>({ languageModels, textEmbeddingModels, imageModels, fallbackProvider, }: {
4125
4178
  languageModels?: LANGUAGE_MODELS;
4126
4179
  textEmbeddingModels?: EMBEDDING_MODELS;
4127
4180
  imageModels?: IMAGE_MODELS;
4128
4181
  fallbackProvider?: ProviderV2;
4129
- }): Provider & {
4130
- languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModel;
4131
- textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModel<string>;
4132
- imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModel;
4182
+ }): ProviderV2 & {
4183
+ languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
4184
+ textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
4185
+ imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
4133
4186
  };
4134
4187
  /**
4135
4188
  * @deprecated Use `customProvider` instead.
@@ -4154,12 +4207,12 @@ declare class NoSuchProviderError extends NoSuchModelError {
4154
4207
 
4155
4208
  type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
4156
4209
  interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
4157
- languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModel;
4158
- languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModel;
4159
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModel<string>;
4160
- textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModel<string>;
4161
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModel;
4162
- imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModel;
4210
+ languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
4211
+ languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
4212
+ textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
4213
+ textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
4214
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
4215
+ imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
4163
4216
  }
4164
4217
  /**
4165
4218
  * Creates a registry for the given providers.
@@ -4311,9 +4364,8 @@ type UIMessageStreamPart = {
4311
4364
  text: string;
4312
4365
  providerMetadata?: ProviderMetadata;
4313
4366
  } | {
4314
- type: 'source';
4315
- sourceType: 'url';
4316
- id: string;
4367
+ type: 'source-url';
4368
+ sourceId: string;
4317
4369
  url: string;
4318
4370
  title?: string;
4319
4371
  providerMetadata?: ProviderMetadata;
@@ -4341,15 +4393,6 @@ type UIMessageStreamPart = {
4341
4393
  type: 'reasoning-part-finish';
4342
4394
  };
4343
4395
 
4344
- type Job = () => Promise<void>;
4345
-
4346
- declare class SerialJobExecutor {
4347
- private queue;
4348
- private isProcessing;
4349
- private processQueue;
4350
- run(job: Job): Promise<void>;
4351
- }
4352
-
4353
4396
  interface UIMessageStreamWriter {
4354
4397
  /**
4355
4398
  * Appends a data stream part to the stream.
@@ -4367,9 +4410,32 @@ interface UIMessageStreamWriter {
4367
4410
  onError: ((error: unknown) => string) | undefined;
4368
4411
  }
4369
4412
 
4370
- declare function createUIMessageStream({ execute, onError, }: {
4371
- execute: (writer: UIMessageStreamWriter) => Promise<void> | void;
4413
+ declare function createUIMessageStream({ execute, onError, // mask error messages for safety by default
4414
+ originalMessages, onFinish, }: {
4415
+ execute: (options: {
4416
+ writer: UIMessageStreamWriter;
4417
+ }) => Promise<void> | void;
4372
4418
  onError?: (error: unknown) => string;
4419
+ /**
4420
+ * The original messages.
4421
+ */
4422
+ originalMessages?: UIMessage[];
4423
+ onFinish?: (options: {
4424
+ /**
4425
+ * The updates list of UI messages.
4426
+ */
4427
+ messages: UIMessage[];
4428
+ /**
4429
+ * Indicates whether the response message is a continuation of the last original message,
4430
+ * or if a new message was created.
4431
+ */
4432
+ isContinuation: boolean;
4433
+ /**
4434
+ * The message that was sent to the client as a response
4435
+ * (including the original message if it was extended).
4436
+ */
4437
+ responseMessage: UIMessage;
4438
+ }) => void;
4373
4439
  }): ReadableStream<UIMessageStreamPart>;
4374
4440
 
4375
4441
  declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
@@ -4389,22 +4455,186 @@ interface ChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
4389
4455
  submitMessages: (options: {
4390
4456
  chatId: string;
4391
4457
  messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4392
- abortController: AbortController;
4393
- body?: object;
4394
- headers?: Record<string, string> | Headers;
4458
+ abortSignal: AbortSignal | undefined;
4395
4459
  requestType: 'generate' | 'resume';
4396
- }) => Promise<ReadableStream<UIMessageStreamPart>>;
4460
+ } & ChatRequestOptions) => Promise<ReadableStream<UIMessageStreamPart>>;
4461
+ }
4462
+
4463
+ type ChatRequestOptions = {
4464
+ /**
4465
+ Additional headers that should be to be passed to the API endpoint.
4466
+ */
4467
+ headers?: Record<string, string> | Headers;
4468
+ /**
4469
+ Additional body JSON properties that should be sent to the API endpoint.
4470
+ */
4471
+ body?: object;
4472
+ metadata?: unknown;
4473
+ };
4474
+ interface ChatSubscriber {
4475
+ onChange: (event: ChatEvent) => void;
4476
+ }
4477
+ interface ChatEvent {
4478
+ type: 'messages-changed' | 'status-changed';
4479
+ }
4480
+ type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
4481
+ interface ChatState<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
4482
+ status: ChatStatus;
4483
+ error: Error | undefined;
4484
+ messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4485
+ pushMessage: (message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
4486
+ popMessage: () => void;
4487
+ replaceMessage: (index: number, message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
4488
+ snapshot: <T>(thing: T) => T;
4489
+ }
4490
+ interface ChatInit<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
4491
+ /**
4492
+ * A unique identifier for the chat. If not provided, a random one will be
4493
+ * generated.
4494
+ */
4495
+ id?: string;
4496
+ messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
4497
+ dataPartSchemas?: UI_DATA_PART_SCHEMAS;
4498
+ messages?: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4499
+ /**
4500
+ * A way to provide a function that is going to be used for ids for messages and the chat.
4501
+ * If not provided the default AI SDK `generateId` is used.
4502
+ */
4503
+ generateId?: IdGenerator;
4504
+ transport?: ChatTransport<NoInfer<MESSAGE_METADATA>, NoInfer<InferUIDataParts<UI_DATA_PART_SCHEMAS>>>;
4505
+ maxSteps?: number;
4506
+ /**
4507
+ * Callback function to be called when an error is encountered.
4508
+ */
4509
+ onError?: (error: Error) => void;
4510
+ /**
4511
+ Optional callback function that is invoked when a tool call is received.
4512
+ Intended for automatic client-side tool execution.
4513
+
4514
+ You can optionally return a result for the tool call,
4515
+ either synchronously or asynchronously.
4516
+ */
4517
+ onToolCall?: ({ toolCall, }: {
4518
+ toolCall: ToolCall<string, unknown>;
4519
+ }) => void | Promise<unknown> | unknown;
4520
+ /**
4521
+ * Optional callback function that is called when the assistant message is finished streaming.
4522
+ *
4523
+ * @param message The message that was streamed.
4524
+ */
4525
+ onFinish?: (options: {
4526
+ message: UIMessage<NoInfer<MESSAGE_METADATA>, NoInfer<InferUIDataParts<UI_DATA_PART_SCHEMAS>>>;
4527
+ }) => void;
4528
+ }
4529
+ declare abstract class AbstractChat<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
4530
+ readonly id: string;
4531
+ readonly generateId: IdGenerator;
4532
+ protected state: ChatState<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4533
+ private readonly subscribers;
4534
+ private messageMetadataSchema;
4535
+ private dataPartSchemas;
4536
+ private readonly transport;
4537
+ private maxSteps;
4538
+ private onError?;
4539
+ private onToolCall?;
4540
+ private onFinish?;
4541
+ private activeResponse;
4542
+ private jobExecutor;
4543
+ constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, }: Omit<ChatInit<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>, 'messages'> & {
4544
+ state: ChatState<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4545
+ });
4546
+ /**
4547
+ * Hook status:
4548
+ *
4549
+ * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
4550
+ * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
4551
+ * - `ready`: The full response has been received and processed; a new user message can be submitted.
4552
+ * - `error`: An error occurred during the API request, preventing successful completion.
4553
+ */
4554
+ get status(): ChatStatus;
4555
+ protected setStatus({ status, error, }: {
4556
+ status: ChatStatus;
4557
+ error?: Error;
4558
+ }): void;
4559
+ get error(): Error | undefined;
4560
+ get messages(): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4561
+ get lastMessage(): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> | undefined;
4562
+ subscribe(subscriber: ChatSubscriber): () => void;
4563
+ set messages(messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[]);
4564
+ removeAssistantResponse: () => void;
4565
+ /**
4566
+ * Append a user message to the chat list. This triggers the API call to fetch
4567
+ * the assistant's response.
4568
+ */
4569
+ sendMessage: (message: (CreateUIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> & {
4570
+ text?: never;
4571
+ files?: never;
4572
+ }) | {
4573
+ text: string;
4574
+ files?: FileList | FileUIPart[];
4575
+ metadata?: MESSAGE_METADATA;
4576
+ parts?: never;
4577
+ } | {
4578
+ files: FileList | FileUIPart[];
4579
+ metadata?: MESSAGE_METADATA;
4580
+ parts?: never;
4581
+ }, options?: ChatRequestOptions) => Promise<void>;
4582
+ /**
4583
+ * Regenerate the last assistant message.
4584
+ */
4585
+ reload: (options?: ChatRequestOptions) => Promise<void>;
4586
+ /**
4587
+ * Resume an ongoing chat generation stream. This does not resume an aborted generation.
4588
+ */
4589
+ experimental_resume: (options?: ChatRequestOptions) => Promise<void>;
4590
+ addToolResult: ({ toolCallId, result, }: {
4591
+ toolCallId: string;
4592
+ result: unknown;
4593
+ }) => Promise<void>;
4594
+ /**
4595
+ * Abort the current request immediately, keep the generated tokens if any.
4596
+ */
4597
+ stop: () => Promise<void>;
4598
+ private emit;
4599
+ private triggerRequest;
4397
4600
  }
4601
+
4602
+ declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
4603
+
4604
+ /**
4605
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
4606
+ with the AI core functions (e.g. `streamText`).
4607
+ */
4608
+ declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
4609
+ tools?: TOOLS;
4610
+ }): ModelMessage[];
4611
+ /**
4612
+ @deprecated Use `convertToModelMessages` instead.
4613
+ */
4614
+ declare const convertToCoreMessages: typeof convertToModelMessages;
4615
+
4616
+ type PrepareRequest<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = (options: {
4617
+ id: string;
4618
+ messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4619
+ requestMetadata: unknown;
4620
+ body: Record<string, any> | undefined;
4621
+ credentials: RequestCredentials | undefined;
4622
+ headers: HeadersInit | undefined;
4623
+ }) => {
4624
+ body: object;
4625
+ headers?: HeadersInit;
4626
+ credentials?: RequestCredentials;
4627
+ };
4628
+
4398
4629
  declare class DefaultChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> implements ChatTransport<MESSAGE_METADATA, DATA_TYPES> {
4399
4630
  private api;
4400
4631
  private credentials?;
4401
4632
  private headers?;
4402
4633
  private body?;
4403
- private streamProtocol?;
4404
4634
  private fetch?;
4405
- private prepareRequestBody?;
4406
- constructor({ api, credentials, headers, body, streamProtocol, fetch, prepareRequestBody, }: {
4407
- api: string;
4635
+ private prepareRequest?;
4636
+ constructor({ api, credentials, headers, body, fetch, prepareRequest, }?: {
4637
+ api?: string;
4408
4638
  /**
4409
4639
  * The credentials mode to be used for the fetch request.
4410
4640
  * Possible values are: 'omit', 'same-origin', 'include'.
@@ -4429,10 +4659,6 @@ declare class DefaultChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTy
4429
4659
  */
4430
4660
  body?: object;
4431
4661
  /**
4432
- Streaming protocol that is used. Defaults to `ui-message`.
4433
- */
4434
- streamProtocol?: 'ui-message' | 'text';
4435
- /**
4436
4662
  Custom fetch implementation. You can use it as a middleware to intercept requests,
4437
4663
  or to provide a custom fetch implementation for e.g. testing.
4438
4664
  */
@@ -4446,448 +4672,63 @@ declare class DefaultChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTy
4446
4672
  * @param messages The current messages in the chat.
4447
4673
  * @param requestBody The request body object passed in the chat request.
4448
4674
  */
4449
- prepareRequestBody?: (options: {
4450
- chatId: string;
4451
- messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4452
- requestBody?: object;
4453
- }) => unknown;
4675
+ prepareRequest?: PrepareRequest<MESSAGE_METADATA, DATA_TYPES>;
4454
4676
  });
4455
- submitMessages({ chatId, messages, abortController, body, headers, requestType, }: Parameters<ChatTransport<MESSAGE_METADATA, DATA_TYPES>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
4677
+ submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<MESSAGE_METADATA, DATA_TYPES>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
4456
4678
  }
4457
4679
 
4458
- type StreamingUIMessageState<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> = {
4459
- message: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4460
- activeTextPart: TextUIPart | undefined;
4461
- activeReasoningPart: ReasoningUIPart | undefined;
4462
- partialToolCalls: Record<string, {
4463
- text: string;
4464
- step: number;
4465
- index: number;
4466
- toolName: string;
4467
- }>;
4468
- step: number;
4469
- };
4680
+ declare function getToolInvocations(message: UIMessage): ToolInvocation[];
4470
4681
 
4471
- interface ChatStoreSubscriber {
4472
- onChatChanged: (event: ChatStoreEvent) => void;
4473
- }
4474
- interface ChatStoreEvent {
4475
- type: 'chat-messages-changed' | 'chat-status-changed';
4476
- chatId: number | string;
4477
- error?: Error;
4478
- }
4479
- type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
4480
- interface Chat<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
4481
- status: ChatStatus;
4482
- messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
4483
- error?: Error;
4484
- activeResponse?: {
4485
- state: StreamingUIMessageState<MESSAGE_METADATA>;
4486
- abortController?: AbortController;
4487
- };
4488
- jobExecutor: SerialJobExecutor;
4489
- }
4490
- type ExtendedCallOptions<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = ChatRequestOptions & {
4491
- onError?: (error: Error) => void;
4492
- /**
4493
- Optional callback function that is invoked when a tool call is received.
4494
- Intended for automatic client-side tool execution.
4495
-
4496
- You can optionally return a result for the tool call,
4497
- either synchronously or asynchronously.
4498
- */
4499
- onToolCall?: ({ toolCall, }: {
4500
- toolCall: ToolCall<string, unknown>;
4501
- }) => void | Promise<unknown> | unknown;
4502
- /**
4503
- * Optional callback function that is called when the assistant message is finished streaming.
4504
- *
4505
- * @param message The message that was streamed.
4506
- */
4507
- onFinish?: (options: {
4508
- message: UIMessage<MESSAGE_METADATA, DATA_TYPES>;
4509
- }) => void;
4510
- };
4511
- type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
4512
- type InferUIDataParts<T extends UIDataPartSchemas> = {
4513
- [K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
4514
- };
4515
- declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas> {
4516
- private chats;
4517
- private subscribers;
4518
- private generateId;
4519
- private messageMetadataSchema;
4520
- private dataPartSchemas;
4521
- private transport;
4522
- private maxSteps;
4523
- constructor({ chats, generateId, transport, maxSteps, messageMetadataSchema, dataPartSchemas, }: {
4524
- chats?: {
4525
- [id: string]: {
4526
- messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4527
- };
4528
- };
4529
- generateId?: UseChatOptions['generateId'];
4530
- transport: ChatTransport<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4531
- maxSteps?: number;
4532
- messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
4533
- dataPartSchemas?: UI_DATA_PART_SCHEMAS;
4534
- });
4535
- hasChat(id: string): boolean;
4536
- addChat(id: string, messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[]): void;
4537
- getChats(): [string, Chat<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>][];
4538
- get chatCount(): number;
4539
- getStatus(id: string): ChatStatus;
4540
- setStatus({ id, status, error, }: {
4541
- id: string;
4542
- status: Chat<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>['status'];
4543
- error?: Error;
4544
- }): void;
4545
- getError(id: string): Error | undefined;
4546
- getMessages(id: string): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4547
- getLastMessage(id: string): UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4548
- subscribe(subscriber: ChatStoreSubscriber): () => void;
4549
- setMessages({ id, messages, }: {
4550
- id: string;
4551
- messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4552
- }): void;
4553
- removeAssistantResponse(id: string): void;
4554
- submitMessage({ chatId, message, headers, body, onError, onToolCall, onFinish, }: ExtendedCallOptions<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> & {
4555
- chatId: string;
4556
- message: CreateUIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
4557
- }): Promise<void>;
4558
- resubmitLastUserMessage({ chatId, headers, body, onError, onToolCall, onFinish, }: ExtendedCallOptions<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> & {
4559
- chatId: string;
4560
- }): Promise<null | undefined>;
4561
- resumeStream({ chatId, headers, body, onError, onToolCall, onFinish, }: ExtendedCallOptions<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>> & {
4562
- chatId: string;
4563
- }): Promise<null | undefined>;
4564
- addToolResult({ chatId, toolCallId, result, }: {
4565
- chatId: string;
4566
- toolCallId: string;
4567
- result: unknown;
4568
- }): Promise<void>;
4569
- stopStream({ chatId }: {
4570
- chatId: string;
4571
- }): Promise<void>;
4572
- private emit;
4573
- private getChat;
4574
- private triggerRequest;
4575
- }
4576
-
4577
- type ChatRequestOptions = {
4578
- /**
4579
- Additional headers that should be to be passed to the API endpoint.
4580
- */
4581
- headers?: Record<string, string> | Headers;
4582
- /**
4583
- Additional body JSON properties that should be sent to the API endpoint.
4584
- */
4585
- body?: object;
4586
- };
4587
- type UseChatOptions<MESSAGE_METADATA = unknown, DATA_TYPE_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> = {
4588
- /**
4589
- * A unique identifier for the chat. If not provided, a random one will be
4590
- * generated. When provided, the `useChat` hook with the same `id` will
4591
- * have shared states across components.
4592
- */
4593
- chatId?: string;
4594
- /**
4595
- * Initial input of the chat.
4596
- */
4597
- initialInput?: string;
4598
- /**
4599
- Optional callback function that is invoked when a tool call is received.
4600
- Intended for automatic client-side tool execution.
4601
-
4602
- You can optionally return a result for the tool call,
4603
- either synchronously or asynchronously.
4604
- */
4605
- onToolCall?: ({ toolCall, }: {
4606
- toolCall: ToolCall<string, unknown>;
4607
- }) => void | Promise<unknown> | unknown;
4608
- /**
4609
- * Optional callback function that is called when the assistant message is finished streaming.
4610
- *
4611
- * @param message The message that was streamed.
4612
- */
4613
- onFinish?: (options: {
4614
- message: UIMessage<MESSAGE_METADATA, InferUIDataParts<DATA_TYPE_SCHEMAS>>;
4615
- }) => void;
4616
- /**
4617
- * Callback function to be called when an error is encountered.
4618
- */
4619
- onError?: (error: Error) => void;
4620
- /**
4621
- * A way to provide a function that is going to be used for ids for messages and the chat.
4622
- * If not provided the default AI SDK `generateId` is used.
4623
- */
4624
- generateId?: IdGenerator;
4625
- /**
4626
- * Optional chat store. Default is used when not provided.
4627
- */
4628
- chatStore?: ChatStore<MESSAGE_METADATA, DATA_TYPE_SCHEMAS>;
4629
- };
4630
- type OriginalUseChatOptions<MESSAGE_METADATA = unknown> = {
4631
- /**
4632
- * Schema for the message metadata. Validates the message metadata.
4633
- * Message metadata can be undefined or must match the schema.
4634
- */
4635
- messageMetadataSchema?: Schema<MESSAGE_METADATA>;
4636
- /**
4637
- * The API endpoint that accepts a `{ messages: Message[] }` object and returns
4638
- * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
4639
- */
4640
- api?: string;
4641
- /**
4642
- * A unique identifier for the chat. If not provided, a random one will be
4643
- * generated. When provided, the `useChat` hook with the same `id` will
4644
- * have shared states across components.
4645
- */
4646
- chatId?: string;
4647
- /**
4648
- * Initial messages of the chat. Useful to load an existing chat history.
4649
- */
4650
- initialMessages?: UIMessage<NoInfer<MESSAGE_METADATA>>[];
4651
- /**
4652
- * Initial input of the chat.
4653
- */
4654
- initialInput?: string;
4655
- /**
4656
- Optional callback function that is invoked when a tool call is received.
4657
- Intended for automatic client-side tool execution.
4658
-
4659
- You can optionally return a result for the tool call,
4660
- either synchronously or asynchronously.
4661
- */
4662
- onToolCall?: ({ toolCall, }: {
4663
- toolCall: ToolCall<string, unknown>;
4664
- }) => void | Promise<unknown> | unknown;
4665
- /**
4666
- * Optional callback function that is called when the assistant message is finished streaming.
4667
- *
4668
- * @param message The message that was streamed.
4669
- */
4670
- onFinish?: (options: {
4671
- message: UIMessage<NoInfer<MESSAGE_METADATA>>;
4672
- }) => void;
4673
- /**
4674
- * Callback function to be called when an error is encountered.
4675
- */
4676
- onError?: (error: Error) => void;
4677
- /**
4678
- * A way to provide a function that is going to be used for ids for messages and the chat.
4679
- * If not provided the default AI SDK `generateId` is used.
4680
- */
4681
- generateId?: IdGenerator;
4682
- /**
4683
- * The credentials mode to be used for the fetch request.
4684
- * Possible values are: 'omit', 'same-origin', 'include'.
4685
- * Defaults to 'same-origin'.
4686
- */
4687
- credentials?: RequestCredentials;
4688
- /**
4689
- * HTTP headers to be sent with the API request.
4690
- */
4691
- headers?: Record<string, string> | Headers;
4692
- /**
4693
- * Extra body object to be sent with the API request.
4694
- * @example
4695
- * Send a `sessionId` to the API along with the messages.
4696
- * ```js
4697
- * useChat({
4698
- * body: {
4699
- * sessionId: '123',
4700
- * }
4701
- * })
4702
- * ```
4703
- */
4704
- body?: object;
4705
- /**
4706
- Streaming protocol that is used. Defaults to `ui-message`.
4707
- */
4708
- streamProtocol?: 'ui-message' | 'text';
4709
- /**
4710
- Custom fetch implementation. You can use it as a middleware to intercept requests,
4711
- or to provide a custom fetch implementation for e.g. testing.
4712
- */
4713
- fetch?: FetchFunction;
4714
- /**
4715
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
4716
- Must be at least 1.
4717
-
4718
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
4719
-
4720
- By default, it's set to 1, which means that only a single LLM call is made.
4721
- */
4722
- maxSteps?: number;
4723
- };
4724
-
4725
- declare const getOriginalFetch$1: () => typeof fetch;
4726
- declare function callChatApi<MESSAGE_METADATA>({ api, body, streamProtocol, credentials, headers, abortController, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, requestType, messageMetadataSchema, }: {
4727
- api: string;
4728
- body: Record<string, any>;
4729
- streamProtocol: 'ui-message' | 'text' | undefined;
4730
- credentials: RequestCredentials | undefined;
4731
- headers: HeadersInit | undefined;
4732
- abortController: (() => AbortController | null) | undefined;
4733
- onUpdate: (options: {
4734
- message: UIMessage<MESSAGE_METADATA>;
4735
- }) => void;
4736
- onFinish: UseChatOptions<MESSAGE_METADATA>['onFinish'];
4737
- onToolCall: UseChatOptions<MESSAGE_METADATA>['onToolCall'];
4738
- generateId: IdGenerator;
4739
- fetch: ReturnType<typeof getOriginalFetch$1> | undefined;
4740
- lastMessage: UIMessage<MESSAGE_METADATA> | undefined;
4741
- requestType?: 'generate' | 'resume';
4742
- messageMetadataSchema?: Schema<MESSAGE_METADATA>;
4743
- }): Promise<void>;
4744
-
4745
- declare const getOriginalFetch: () => typeof fetch;
4746
- declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
4747
- api: string;
4748
- prompt: string;
4749
- credentials: RequestCredentials | undefined;
4750
- headers: HeadersInit | undefined;
4751
- body: Record<string, any>;
4752
- streamProtocol: 'data' | 'text' | undefined;
4753
- setCompletion: (completion: string) => void;
4754
- setLoading: (loading: boolean) => void;
4755
- setError: (error: Error | undefined) => void;
4756
- setAbortController: (abortController: AbortController | null) => void;
4757
- onFinish: ((prompt: string, completion: string) => void) | undefined;
4758
- onError: ((error: Error) => void) | undefined;
4759
- fetch: ReturnType<typeof getOriginalFetch> | undefined;
4760
- }): Promise<string | null | undefined>;
4761
-
4762
- declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
4763
-
4764
- /**
4765
- Converts an array of messages from useChat into an array of CoreMessages that can be used
4766
- with the AI core functions (e.g. `streamText`).
4767
- */
4768
- declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages: Array<Omit<UIMessage, 'id'>>, options?: {
4769
- tools?: TOOLS;
4770
- }): ModelMessage[];
4771
- /**
4772
- @deprecated Use `convertToModelMessages` instead.
4773
- */
4774
- declare const convertToCoreMessages: typeof convertToModelMessages;
4775
-
4776
- declare function defaultChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas>({ api, fetch, streamProtocol, credentials, headers, body, prepareRequestBody, generateId, dataPartSchemas, messageMetadataSchema, maxSteps, chats, }: {
4777
- /**
4778
- * Schema for the message metadata. Validates the message metadata.
4779
- * Message metadata can be undefined or must match the schema.
4780
- */
4781
- messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
4782
- /**
4783
- * Schema for the data types. Validates the data types.
4784
- */
4785
- dataPartSchemas?: UI_DATA_PART_SCHEMAS;
4786
- /**
4787
- * The API endpoint that accepts a `{ messages: Message[] }` object and returns
4788
- * a stream of tokens of the AI chat response.
4789
- */
4790
- api: string;
4791
- /**
4792
- * A way to provide a function that is going to be used for ids for messages and the chat.
4793
- * If not provided the default AI SDK `generateId` is used.
4794
- */
4795
- generateId?: IdGenerator;
4796
- /**
4797
- * The credentials mode to be used for the fetch request.
4798
- * Possible values are: 'omit', 'same-origin', 'include'.
4799
- * Defaults to 'same-origin'.
4800
- */
4801
- credentials?: RequestCredentials;
4802
- /**
4803
- * HTTP headers to be sent with the API request.
4804
- */
4805
- headers?: Record<string, string> | Headers;
4806
- /**
4807
- * Extra body object to be sent with the API request.
4808
- * @example
4809
- * Send a `sessionId` to the API along with the messages.
4810
- * ```js
4811
- * useChat({
4812
- * body: {
4813
- * sessionId: '123',
4814
- * }
4815
- * })
4816
- * ```
4817
- */
4818
- body?: object;
4819
- /**
4820
- Streaming protocol that is used. Defaults to `ui-message`.
4682
+ declare class TextStreamChatTransport<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> implements ChatTransport<MESSAGE_METADATA, DATA_TYPES> {
4683
+ private api;
4684
+ private credentials?;
4685
+ private headers?;
4686
+ private body?;
4687
+ private fetch?;
4688
+ private prepareRequest?;
4689
+ constructor({ api, credentials, headers, body, fetch, prepareRequest, }: {
4690
+ api: string;
4691
+ /**
4692
+ * The credentials mode to be used for the fetch request.
4693
+ * Possible values are: 'omit', 'same-origin', 'include'.
4694
+ * Defaults to 'same-origin'.
4821
4695
  */
4822
- streamProtocol?: 'ui-message' | 'text';
4823
- /**
4696
+ credentials?: RequestCredentials;
4697
+ /**
4698
+ * HTTP headers to be sent with the API request.
4699
+ */
4700
+ headers?: Record<string, string> | Headers;
4701
+ /**
4702
+ * Extra body object to be sent with the API request.
4703
+ * @example
4704
+ * Send a `sessionId` to the API along with the messages.
4705
+ * ```js
4706
+ * useChat({
4707
+ * body: {
4708
+ * sessionId: '123',
4709
+ * }
4710
+ * })
4711
+ * ```
4712
+ */
4713
+ body?: object;
4714
+ /**
4824
4715
  Custom fetch implementation. You can use it as a middleware to intercept requests,
4825
4716
  or to provide a custom fetch implementation for e.g. testing.
4826
4717
  */
4827
- fetch?: FetchFunction;
4828
- /**
4829
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
4830
- Must be at least 1.
4831
-
4832
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
4833
-
4834
- By default, it's set to 1, which means that only a single LLM call is made.
4835
- */
4836
- maxSteps?: number;
4837
- /**
4838
- * When a function is provided, it will be used
4839
- * to prepare the request body for the chat API. This can be useful for
4840
- * customizing the request body based on the messages and data in the chat.
4841
- *
4842
- * @param chatId The id of the chat.
4843
- * @param messages The current messages in the chat.
4844
- * @param requestBody The request body object passed in the chat request.
4845
- */
4846
- prepareRequestBody?: (options: {
4847
- chatId: string;
4848
- messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4849
- requestBody?: object;
4850
- }) => unknown;
4851
- chats?: {
4852
- [id: string]: {
4853
- messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
4854
- };
4855
- };
4856
- }): ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>;
4857
-
4858
- declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
4859
-
4860
- declare function getToolInvocations(message: UIMessage): ToolInvocation[];
4861
-
4862
- declare function shouldResubmitMessages({ originalMaxToolInvocationStep, originalMessageCount, maxSteps, messages, }: {
4863
- originalMaxToolInvocationStep: number | undefined;
4864
- originalMessageCount: number;
4865
- maxSteps: number;
4866
- messages: UIMessage[];
4867
- }): boolean;
4868
- /**
4869
- Check if the message is an assistant message with completed tool calls.
4870
- The last step of the message must have at least one tool invocation and
4871
- all tool invocations must have a result.
4872
- */
4873
- declare function isAssistantMessageWithCompletedToolCalls(message: UIMessage): message is UIMessage & {
4874
- role: 'assistant';
4875
- };
4876
-
4877
- /**
4878
- * Updates the result of a specific tool invocation in the last message of the given messages array.
4879
- *
4880
- * @param {object} params - The parameters object.
4881
- * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
4882
- * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
4883
- * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
4884
- * @returns {void} This function does not return anything.
4885
- */
4886
- declare function updateToolCallResult({ messages, toolCallId, toolResult: result, }: {
4887
- messages: UIMessage[];
4888
- toolCallId: string;
4889
- toolResult: unknown;
4890
- }): void;
4718
+ fetch?: FetchFunction;
4719
+ /**
4720
+ * When a function is provided, it will be used
4721
+ * to prepare the request body for the chat API. This can be useful for
4722
+ * customizing the request body based on the messages and data in the chat.
4723
+ *
4724
+ * @param id The id of the chat.
4725
+ * @param messages The current messages in the chat.
4726
+ * @param requestBody The request body object passed in the chat request.
4727
+ */
4728
+ prepareRequest?: NoInfer<PrepareRequest<MESSAGE_METADATA, DATA_TYPES>>;
4729
+ });
4730
+ submitMessages({ chatId, messages, abortSignal, metadata, headers, body, requestType, }: Parameters<ChatTransport<MESSAGE_METADATA, DATA_TYPES>['submitMessages']>[0]): Promise<ReadableStream<UIMessageStreamPart>>;
4731
+ }
4891
4732
 
4892
4733
  type CompletionRequestOptions = {
4893
4734
  /**
@@ -4961,4 +4802,4 @@ type UseCompletionOptions = {
4961
4802
  fetch?: FetchFunction;
4962
4803
  };
4963
4804
 
4964
- export { AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatRequestOptions, ChatStatus, ChatStore, ChatStoreEvent, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts as InferUIDataTypes, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, OriginalUseChatOptions, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataTypes, UIDataPartSchemas as UIDataTypesSchemas, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultChatStore, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, hasToolCall, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, maxSteps, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };
4805
+ export { AbstractChat, AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatEvent, ChatInit, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GLOBAL_DEFAULT_PROVIDER, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamChatTransport, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseCompletionOptions, UserContent, UserModelMessage, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, hasToolCall, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };