ai 5.0.0-alpha.3 → 5.0.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/dist/index.d.mts +257 -147
- package/dist/index.d.ts +257 -147
- package/dist/index.js +330 -255
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +327 -253
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.d.ts
CHANGED
@@ -2693,10 +2693,35 @@ declare namespace output {
|
|
2693
2693
|
};
|
2694
2694
|
}
|
2695
2695
|
|
2696
|
+
/**
|
2697
|
+
Function that you can use to provide different settings for a step.
|
2698
|
+
|
2699
|
+
@param options - The options for the step.
|
2700
|
+
@param options.steps - The steps that have been executed so far.
|
2701
|
+
@param options.stepNumber - The number of the step that is being executed.
|
2702
|
+
@param options.model - The model that is being used.
|
2703
|
+
|
2704
|
+
@returns An object that contains the settings for the step.
|
2705
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2706
|
+
*/
|
2707
|
+
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
|
2708
|
+
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
2709
|
+
stepNumber: number;
|
2710
|
+
model: LanguageModel;
|
2711
|
+
}) => PromiseLike<{
|
2712
|
+
model?: LanguageModel;
|
2713
|
+
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2714
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2715
|
+
} | undefined> | {
|
2716
|
+
model?: LanguageModel;
|
2717
|
+
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2718
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2719
|
+
} | undefined;
|
2720
|
+
|
2696
2721
|
type StopCondition<TOOLS extends ToolSet> = (options: {
|
2697
2722
|
steps: Array<StepResult<TOOLS>>;
|
2698
2723
|
}) => PromiseLike<boolean> | boolean;
|
2699
|
-
declare function
|
2724
|
+
declare function stepCountIs(stepCount: number): StopCondition<any>;
|
2700
2725
|
declare function hasToolCall(toolName: string): StopCondition<any>;
|
2701
2726
|
|
2702
2727
|
/**
|
@@ -2751,7 +2776,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2751
2776
|
@returns
|
2752
2777
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
2753
2778
|
*/
|
2754
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers,
|
2779
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2755
2780
|
/**
|
2756
2781
|
The language model to use.
|
2757
2782
|
*/
|
@@ -2764,7 +2789,13 @@ The tools that the model can call. The model needs to support calling tools.
|
|
2764
2789
|
The tool choice strategy. Default: 'auto'.
|
2765
2790
|
*/
|
2766
2791
|
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2767
|
-
|
2792
|
+
/**
|
2793
|
+
Condition for stopping the generation when there are tool results in the last step.
|
2794
|
+
When the condition is an array, any of the conditions can be met to stop the generation.
|
2795
|
+
|
2796
|
+
@default stepCountIs(1)
|
2797
|
+
*/
|
2798
|
+
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
2768
2799
|
/**
|
2769
2800
|
Optional telemetry configuration (experimental).
|
2770
2801
|
*/
|
@@ -2776,34 +2807,26 @@ functionality that can be fully encapsulated in the provider.
|
|
2776
2807
|
*/
|
2777
2808
|
providerOptions?: ProviderOptions;
|
2778
2809
|
/**
|
2810
|
+
* @deprecated Use `activeTools` instead.
|
2811
|
+
*/
|
2812
|
+
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2813
|
+
/**
|
2779
2814
|
Limits the tools that are available for the model to call without
|
2780
2815
|
changing the tool call and result types in the result.
|
2781
2816
|
*/
|
2782
|
-
|
2817
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2783
2818
|
/**
|
2784
2819
|
Optional specification for parsing structured outputs from the LLM response.
|
2785
2820
|
*/
|
2786
2821
|
experimental_output?: Output$1<OUTPUT, OUTPUT_PARTIAL>;
|
2787
2822
|
/**
|
2823
|
+
* @deprecated Use `prepareStep` instead.
|
2824
|
+
*/
|
2825
|
+
experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
2826
|
+
/**
|
2788
2827
|
Optional function that you can use to provide different settings for a step.
|
2789
|
-
|
2790
|
-
@param options - The options for the step.
|
2791
|
-
@param options.steps - The steps that have been executed so far.
|
2792
|
-
@param options.stepNumber - The number of the step that is being executed.
|
2793
|
-
@param options.model - The model that is being used.
|
2794
|
-
|
2795
|
-
@returns An object that contains the settings for the step.
|
2796
|
-
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2797
2828
|
*/
|
2798
|
-
|
2799
|
-
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
2800
|
-
stepNumber: number;
|
2801
|
-
model: LanguageModel;
|
2802
|
-
}) => PromiseLike<{
|
2803
|
-
model?: LanguageModel;
|
2804
|
-
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2805
|
-
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2806
|
-
} | undefined>;
|
2829
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
2807
2830
|
/**
|
2808
2831
|
A function that attempts to repair a tool call that failed to parse.
|
2809
2832
|
*/
|
@@ -2877,7 +2900,7 @@ type UIMessageStreamOptions = {
|
|
2877
2900
|
* that send additional data.
|
2878
2901
|
* Default to true.
|
2879
2902
|
*/
|
2880
|
-
|
2903
|
+
sendFinish?: boolean;
|
2881
2904
|
/**
|
2882
2905
|
* Send the message start event to the client.
|
2883
2906
|
* Set to false if you are using additional streamText calls
|
@@ -2889,7 +2912,7 @@ type UIMessageStreamOptions = {
|
|
2889
2912
|
* streamText calls that send additional data to prevent
|
2890
2913
|
* the message start event from being sent multiple times.
|
2891
2914
|
*/
|
2892
|
-
|
2915
|
+
sendStart?: boolean;
|
2893
2916
|
/**
|
2894
2917
|
* Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
|
2895
2918
|
*
|
@@ -3241,7 +3264,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
3241
3264
|
@return
|
3242
3265
|
A result object for accessing different stream types and additional information.
|
3243
3266
|
*/
|
3244
|
-
declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers,
|
3267
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
3245
3268
|
/**
|
3246
3269
|
The language model to use.
|
3247
3270
|
*/
|
@@ -3254,7 +3277,13 @@ The tools that the model can call. The model needs to support calling tools.
|
|
3254
3277
|
The tool choice strategy. Default: 'auto'.
|
3255
3278
|
*/
|
3256
3279
|
toolChoice?: ToolChoice<TOOLS>;
|
3257
|
-
|
3280
|
+
/**
|
3281
|
+
Condition for stopping the generation when there are tool results in the last step.
|
3282
|
+
When the condition is an array, any of the conditions can be met to stop the generation.
|
3283
|
+
|
3284
|
+
@default stepCountIs(1)
|
3285
|
+
*/
|
3286
|
+
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
3258
3287
|
/**
|
3259
3288
|
Optional telemetry configuration (experimental).
|
3260
3289
|
*/
|
@@ -3266,15 +3295,31 @@ functionality that can be fully encapsulated in the provider.
|
|
3266
3295
|
*/
|
3267
3296
|
providerOptions?: ProviderOptions;
|
3268
3297
|
/**
|
3269
|
-
|
3270
|
-
changing the tool call and result types in the result.
|
3298
|
+
* @deprecated Use `activeTools` instead.
|
3271
3299
|
*/
|
3272
|
-
experimental_activeTools?: Array<keyof TOOLS
|
3300
|
+
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
|
3301
|
+
/**
|
3302
|
+
Limits the tools that are available for the model to call without
|
3303
|
+
changing the tool call and result types in the result.
|
3304
|
+
*/
|
3305
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
3273
3306
|
/**
|
3274
3307
|
Optional specification for parsing structured outputs from the LLM response.
|
3275
3308
|
*/
|
3276
3309
|
experimental_output?: Output$1<OUTPUT, PARTIAL_OUTPUT>;
|
3277
3310
|
/**
|
3311
|
+
Optional function that you can use to provide different settings for a step.
|
3312
|
+
|
3313
|
+
@param options - The options for the step.
|
3314
|
+
@param options.steps - The steps that have been executed so far.
|
3315
|
+
@param options.stepNumber - The number of the step that is being executed.
|
3316
|
+
@param options.model - The model that is being used.
|
3317
|
+
|
3318
|
+
@returns An object that contains the settings for the step.
|
3319
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
3320
|
+
*/
|
3321
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
3322
|
+
/**
|
3278
3323
|
A function that attempts to repair a tool call that failed to parse.
|
3279
3324
|
*/
|
3280
3325
|
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
@@ -3618,6 +3663,20 @@ functionality that can be fully encapsulated in the provider.
|
|
3618
3663
|
};
|
3619
3664
|
}): Promise<GenerateObjectResult<RESULT>>;
|
3620
3665
|
|
3666
|
+
/**
|
3667
|
+
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
3668
|
+
* comparing the similarity of two vectors such as embeddings.
|
3669
|
+
*
|
3670
|
+
* @param vector1 - The first vector.
|
3671
|
+
* @param vector2 - The second vector.
|
3672
|
+
*
|
3673
|
+
* @returns The cosine similarity between vector1 and vector2.
|
3674
|
+
* @returns 0 if either vector is the zero vector.
|
3675
|
+
*
|
3676
|
+
* @throws {InvalidArgumentError} If the vectors do not have the same length.
|
3677
|
+
*/
|
3678
|
+
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
3679
|
+
|
3621
3680
|
/**
|
3622
3681
|
* Converts a data URL of type text/* to a text string.
|
3623
3682
|
*/
|
@@ -3637,19 +3696,14 @@ declare function parsePartialJson(jsonText: string | undefined): Promise<{
|
|
3637
3696
|
state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
|
3638
3697
|
}>;
|
3639
3698
|
|
3640
|
-
|
3641
|
-
|
3642
|
-
|
3643
|
-
|
3644
|
-
|
3645
|
-
|
3646
|
-
|
3647
|
-
|
3648
|
-
* @returns 0 if either vector is the zero vector.
|
3649
|
-
*
|
3650
|
-
* @throws {InvalidArgumentError} If the vectors do not have the same length.
|
3651
|
-
*/
|
3652
|
-
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
3699
|
+
type Job = () => Promise<void>;
|
3700
|
+
|
3701
|
+
declare class SerialJobExecutor {
|
3702
|
+
private queue;
|
3703
|
+
private isProcessing;
|
3704
|
+
private processQueue;
|
3705
|
+
run(job: Job): Promise<void>;
|
3706
|
+
}
|
3653
3707
|
|
3654
3708
|
/**
|
3655
3709
|
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
@@ -4327,15 +4381,6 @@ type UIMessageStreamPart = {
|
|
4327
4381
|
type: 'reasoning-part-finish';
|
4328
4382
|
};
|
4329
4383
|
|
4330
|
-
type Job = () => Promise<void>;
|
4331
|
-
|
4332
|
-
declare class SerialJobExecutor {
|
4333
|
-
private queue;
|
4334
|
-
private isProcessing;
|
4335
|
-
private processQueue;
|
4336
|
-
run(job: Job): Promise<void>;
|
4337
|
-
}
|
4338
|
-
|
4339
4384
|
interface UIMessageStreamWriter {
|
4340
4385
|
/**
|
4341
4386
|
* Appends a data stream part to the stream.
|
@@ -4353,9 +4398,32 @@ interface UIMessageStreamWriter {
|
|
4353
4398
|
onError: ((error: unknown) => string) | undefined;
|
4354
4399
|
}
|
4355
4400
|
|
4356
|
-
declare function createUIMessageStream({ execute, onError,
|
4357
|
-
|
4401
|
+
declare function createUIMessageStream({ execute, onError, // mask error messages for safety by default
|
4402
|
+
originalMessages, onFinish, }: {
|
4403
|
+
execute: (options: {
|
4404
|
+
writer: UIMessageStreamWriter;
|
4405
|
+
}) => Promise<void> | void;
|
4358
4406
|
onError?: (error: unknown) => string;
|
4407
|
+
/**
|
4408
|
+
* The original messages.
|
4409
|
+
*/
|
4410
|
+
originalMessages?: UIMessage[];
|
4411
|
+
onFinish?: (options: {
|
4412
|
+
/**
|
4413
|
+
* The updates list of UI messages.
|
4414
|
+
*/
|
4415
|
+
messages: UIMessage[];
|
4416
|
+
/**
|
4417
|
+
* Indicates whether the response message is a continuation of the last original message,
|
4418
|
+
* or if a new message was created.
|
4419
|
+
*/
|
4420
|
+
isContinuation: boolean;
|
4421
|
+
/**
|
4422
|
+
* The message that was sent to the client as a response
|
4423
|
+
* (including the original message if it was extended).
|
4424
|
+
*/
|
4425
|
+
responseMessage: UIMessage;
|
4426
|
+
}) => void;
|
4359
4427
|
}): ReadableStream<UIMessageStreamPart>;
|
4360
4428
|
|
4361
4429
|
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
|
@@ -4512,16 +4580,10 @@ interface ChatStoreEvent {
|
|
4512
4580
|
error?: Error;
|
4513
4581
|
}
|
4514
4582
|
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
4515
|
-
|
4516
|
-
|
4517
|
-
|
4518
|
-
|
4519
|
-
activeResponse?: {
|
4520
|
-
state: StreamingUIMessageState<MESSAGE_METADATA>;
|
4521
|
-
abortController?: AbortController;
|
4522
|
-
};
|
4523
|
-
jobExecutor: SerialJobExecutor;
|
4524
|
-
}
|
4583
|
+
type ActiveResponse<MESSAGE_METADATA> = {
|
4584
|
+
state: StreamingUIMessageState<MESSAGE_METADATA>;
|
4585
|
+
abortController: AbortController | undefined;
|
4586
|
+
};
|
4525
4587
|
type ExtendedCallOptions<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = ChatRequestOptions & {
|
4526
4588
|
onError?: (error: Error) => void;
|
4527
4589
|
/**
|
@@ -4547,15 +4609,47 @@ type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
|
4547
4609
|
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
4548
4610
|
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
4549
4611
|
};
|
4550
|
-
|
4612
|
+
type ChatFactory<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = (options: {
|
4613
|
+
messages?: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
|
4614
|
+
}) => Chat<MESSAGE_METADATA, DATA_TYPES>;
|
4615
|
+
type ChatStoreOptions<MESSAGE_METADATA, DATA_PART_SCHEMAS extends UIDataPartSchemas> = {
|
4616
|
+
chats?: {
|
4617
|
+
[id: string]: {
|
4618
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<DATA_PART_SCHEMAS>>[];
|
4619
|
+
};
|
4620
|
+
};
|
4621
|
+
generateId?: UseChatOptions['generateId'];
|
4622
|
+
transport: ChatTransport<MESSAGE_METADATA, InferUIDataParts<DATA_PART_SCHEMAS>>;
|
4623
|
+
maxSteps?: number;
|
4624
|
+
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4625
|
+
dataPartSchemas?: DATA_PART_SCHEMAS;
|
4626
|
+
};
|
4627
|
+
type ChatStoreFactory<MESSAGE_METADATA, DATA_PART_SCHEMAS extends UIDataPartSchemas> = (options: ChatStoreOptions<MESSAGE_METADATA, DATA_PART_SCHEMAS>) => ChatStore<MESSAGE_METADATA, DATA_PART_SCHEMAS>;
|
4628
|
+
interface Chat<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
|
4629
|
+
readonly status: ChatStatus;
|
4630
|
+
readonly messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
|
4631
|
+
readonly error: Error | undefined;
|
4632
|
+
readonly activeResponse: ActiveResponse<MESSAGE_METADATA> | undefined;
|
4633
|
+
readonly jobExecutor: SerialJobExecutor;
|
4634
|
+
setStatus: (status: ChatStatus) => void;
|
4635
|
+
setError: (error: Error | undefined) => void;
|
4636
|
+
setActiveResponse: (activeResponse: ActiveResponse<MESSAGE_METADATA> | undefined) => void;
|
4637
|
+
pushMessage: (message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
|
4638
|
+
popMessage: () => void;
|
4639
|
+
replaceMessage: (index: number, message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
|
4640
|
+
setMessages: (messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[]) => void;
|
4641
|
+
snapshot?: <T>(thing: T) => T;
|
4642
|
+
}
|
4643
|
+
declare class ChatStore<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
|
4551
4644
|
private chats;
|
4645
|
+
private readonly createChat;
|
4552
4646
|
private subscribers;
|
4553
4647
|
private generateId;
|
4554
4648
|
private messageMetadataSchema;
|
4555
4649
|
private dataPartSchemas;
|
4556
4650
|
private transport;
|
4557
4651
|
private maxSteps;
|
4558
|
-
constructor({ chats, generateId, transport, maxSteps, messageMetadataSchema, dataPartSchemas, }: {
|
4652
|
+
constructor({ chats, generateId, transport, maxSteps, messageMetadataSchema, dataPartSchemas, createChat, }: {
|
4559
4653
|
chats?: {
|
4560
4654
|
[id: string]: {
|
4561
4655
|
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
@@ -4566,6 +4660,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4566
4660
|
maxSteps?: number;
|
4567
4661
|
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4568
4662
|
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
4663
|
+
createChat: ChatFactory<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
|
4569
4664
|
});
|
4570
4665
|
hasChat(id: string): boolean;
|
4571
4666
|
addChat(id: string, messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[]): void;
|
@@ -4574,7 +4669,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4574
4669
|
getStatus(id: string): ChatStatus;
|
4575
4670
|
setStatus({ id, status, error, }: {
|
4576
4671
|
id: string;
|
4577
|
-
status:
|
4672
|
+
status: ChatStatus;
|
4578
4673
|
error?: Error;
|
4579
4674
|
}): void;
|
4580
4675
|
getError(id: string): Error | undefined;
|
@@ -4605,7 +4700,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4605
4700
|
chatId: string;
|
4606
4701
|
}): Promise<void>;
|
4607
4702
|
private emit;
|
4608
|
-
private
|
4703
|
+
private getChatState;
|
4609
4704
|
private triggerRequest;
|
4610
4705
|
}
|
4611
4706
|
|
@@ -4658,9 +4753,21 @@ type UseChatOptions<MESSAGE_METADATA = unknown, DATA_TYPE_SCHEMAS extends UIData
|
|
4658
4753
|
*/
|
4659
4754
|
generateId?: IdGenerator;
|
4660
4755
|
/**
|
4661
|
-
*
|
4756
|
+
* Chat store that should be used.
|
4757
|
+
* It must not change during the component lifecycle.
|
4758
|
+
*
|
4759
|
+
* When a ChatStore is provided, it will be used as is.
|
4760
|
+
* It should be stable and the stability is guaranteed by the user.
|
4761
|
+
*
|
4762
|
+
* When a function is provided, it will be called to create a new chat store.
|
4763
|
+
* The function will be called when the hook is mounted and the chat store will be
|
4764
|
+
* created.
|
4765
|
+
* The function will be called with the same arguments as the hook is called with.
|
4766
|
+
* The function should return a ChatStoreOptions object.
|
4767
|
+
*
|
4768
|
+
* When no value is provided, a default chat store will be created.
|
4662
4769
|
*/
|
4663
|
-
chatStore?: ChatStore<MESSAGE_METADATA, DATA_TYPE_SCHEMAS
|
4770
|
+
chatStore?: ChatStore<MESSAGE_METADATA, DATA_TYPE_SCHEMAS> | (() => ChatStoreOptions<MESSAGE_METADATA, DATA_TYPE_SCHEMAS>);
|
4664
4771
|
};
|
4665
4772
|
type OriginalUseChatOptions<MESSAGE_METADATA = unknown> = {
|
4666
4773
|
/**
|
@@ -4808,84 +4915,6 @@ declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages:
|
|
4808
4915
|
*/
|
4809
4916
|
declare const convertToCoreMessages: typeof convertToModelMessages;
|
4810
4917
|
|
4811
|
-
declare function defaultChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas>({ api, fetch, credentials, headers, body, prepareRequestBody, generateId, dataPartSchemas, messageMetadataSchema, maxSteps, chats, }: {
|
4812
|
-
/**
|
4813
|
-
* Schema for the message metadata. Validates the message metadata.
|
4814
|
-
* Message metadata can be undefined or must match the schema.
|
4815
|
-
*/
|
4816
|
-
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4817
|
-
/**
|
4818
|
-
* Schema for the data types. Validates the data types.
|
4819
|
-
*/
|
4820
|
-
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
4821
|
-
/**
|
4822
|
-
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
|
4823
|
-
* a stream of tokens of the AI chat response.
|
4824
|
-
*/
|
4825
|
-
api: string;
|
4826
|
-
/**
|
4827
|
-
* A way to provide a function that is going to be used for ids for messages and the chat.
|
4828
|
-
* If not provided the default AI SDK `generateId` is used.
|
4829
|
-
*/
|
4830
|
-
generateId?: IdGenerator;
|
4831
|
-
/**
|
4832
|
-
* The credentials mode to be used for the fetch request.
|
4833
|
-
* Possible values are: 'omit', 'same-origin', 'include'.
|
4834
|
-
* Defaults to 'same-origin'.
|
4835
|
-
*/
|
4836
|
-
credentials?: RequestCredentials;
|
4837
|
-
/**
|
4838
|
-
* HTTP headers to be sent with the API request.
|
4839
|
-
*/
|
4840
|
-
headers?: Record<string, string> | Headers;
|
4841
|
-
/**
|
4842
|
-
* Extra body object to be sent with the API request.
|
4843
|
-
* @example
|
4844
|
-
* Send a `sessionId` to the API along with the messages.
|
4845
|
-
* ```js
|
4846
|
-
* useChat({
|
4847
|
-
* body: {
|
4848
|
-
* sessionId: '123',
|
4849
|
-
* }
|
4850
|
-
* })
|
4851
|
-
* ```
|
4852
|
-
*/
|
4853
|
-
body?: object;
|
4854
|
-
/**
|
4855
|
-
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
4856
|
-
or to provide a custom fetch implementation for e.g. testing.
|
4857
|
-
*/
|
4858
|
-
fetch?: FetchFunction;
|
4859
|
-
/**
|
4860
|
-
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
4861
|
-
Must be at least 1.
|
4862
|
-
|
4863
|
-
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
4864
|
-
|
4865
|
-
By default, it's set to 1, which means that only a single LLM call is made.
|
4866
|
-
*/
|
4867
|
-
maxSteps?: number;
|
4868
|
-
/**
|
4869
|
-
* When a function is provided, it will be used
|
4870
|
-
* to prepare the request body for the chat API. This can be useful for
|
4871
|
-
* customizing the request body based on the messages and data in the chat.
|
4872
|
-
*
|
4873
|
-
* @param chatId The id of the chat.
|
4874
|
-
* @param messages The current messages in the chat.
|
4875
|
-
* @param requestBody The request body object passed in the chat request.
|
4876
|
-
*/
|
4877
|
-
prepareRequestBody?: (options: {
|
4878
|
-
chatId: string;
|
4879
|
-
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
4880
|
-
requestBody?: object;
|
4881
|
-
}) => unknown;
|
4882
|
-
chats?: {
|
4883
|
-
[id: string]: {
|
4884
|
-
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
4885
|
-
};
|
4886
|
-
};
|
4887
|
-
}): ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>;
|
4888
|
-
|
4889
4918
|
declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
|
4890
4919
|
|
4891
4920
|
declare function getToolInvocations(message: UIMessage): ToolInvocation[];
|
@@ -4992,4 +5021,85 @@ type UseCompletionOptions = {
|
|
4992
5021
|
fetch?: FetchFunction;
|
4993
5022
|
};
|
4994
5023
|
|
4995
|
-
|
5024
|
+
interface DefaultChatStoreOptions<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
|
5025
|
+
/**
|
5026
|
+
* Schema for the message metadata. Validates the message metadata.
|
5027
|
+
* Message metadata can be undefined or must match the schema.
|
5028
|
+
*/
|
5029
|
+
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
5030
|
+
/**
|
5031
|
+
* Schema for the data types. Validates the data types.
|
5032
|
+
*/
|
5033
|
+
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
5034
|
+
/**
|
5035
|
+
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
|
5036
|
+
* a stream of tokens of the AI chat response.
|
5037
|
+
*
|
5038
|
+
* Defaults to `/api/chat`
|
5039
|
+
*/
|
5040
|
+
api?: string;
|
5041
|
+
/**
|
5042
|
+
* A way to provide a function that is going to be used for ids for messages and the chat.
|
5043
|
+
* If not provided the default AI SDK `generateId` is used.
|
5044
|
+
*/
|
5045
|
+
generateId?: IdGenerator;
|
5046
|
+
/**
|
5047
|
+
* The credentials mode to be used for the fetch request.
|
5048
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
5049
|
+
* Defaults to 'same-origin'.
|
5050
|
+
*/
|
5051
|
+
credentials?: RequestCredentials;
|
5052
|
+
/**
|
5053
|
+
* HTTP headers to be sent with the API request.
|
5054
|
+
*/
|
5055
|
+
headers?: Record<string, string> | Headers;
|
5056
|
+
/**
|
5057
|
+
* Extra body object to be sent with the API request.
|
5058
|
+
* @example
|
5059
|
+
* Send a `sessionId` to the API along with the messages.
|
5060
|
+
* ```js
|
5061
|
+
* useChat({
|
5062
|
+
* body: {
|
5063
|
+
* sessionId: '123',
|
5064
|
+
* }
|
5065
|
+
* })
|
5066
|
+
* ```
|
5067
|
+
*/
|
5068
|
+
body?: object;
|
5069
|
+
/**
|
5070
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
5071
|
+
or to provide a custom fetch implementation for e.g. testing.
|
5072
|
+
*/
|
5073
|
+
fetch?: FetchFunction;
|
5074
|
+
/**
|
5075
|
+
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
5076
|
+
Must be at least 1.
|
5077
|
+
|
5078
|
+
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
5079
|
+
|
5080
|
+
By default, it's set to 1, which means that only a single LLM call is made.
|
5081
|
+
*/
|
5082
|
+
maxSteps?: number;
|
5083
|
+
/**
|
5084
|
+
* When a function is provided, it will be used
|
5085
|
+
* to prepare the request body for the chat API. This can be useful for
|
5086
|
+
* customizing the request body based on the messages and data in the chat.
|
5087
|
+
*
|
5088
|
+
* @param chatId The id of the chat.
|
5089
|
+
* @param messages The current messages in the chat.
|
5090
|
+
* @param requestBody The request body object passed in the chat request.
|
5091
|
+
*/
|
5092
|
+
prepareRequestBody?: (options: {
|
5093
|
+
chatId: string;
|
5094
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
5095
|
+
requestBody?: object;
|
5096
|
+
}) => unknown;
|
5097
|
+
chats?: {
|
5098
|
+
[id: string]: {
|
5099
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
5100
|
+
};
|
5101
|
+
};
|
5102
|
+
}
|
5103
|
+
declare function defaultChatStoreOptions<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas>({ api, fetch, credentials, headers, body, prepareRequestBody, generateId, messageMetadataSchema, maxSteps, dataPartSchemas, chats, }: DefaultChatStoreOptions<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>): () => ChatStoreOptions<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>;
|
5104
|
+
|
5105
|
+
export { ActiveResponse, AssistantContent, AssistantModelMessage, CallSettings, CallWarning, Chat, ChatRequestOptions, ChatStatus, ChatStore, ChatStoreEvent, ChatStoreFactory, ChatStoreOptions, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatStoreOptions, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, OriginalUseChatOptions, output as Output, PrepareStepFunction, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamChatTransport, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultChatStoreOptions, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, hasToolCall, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };
|