ai 5.0.0-alpha.4 → 5.0.0-alpha.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +27 -0
- package/dist/index.d.mts +226 -169
- package/dist/index.d.ts +226 -169
- package/dist/index.js +291 -229
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +295 -230
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,32 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 5.0.0-alpha.6
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 0d2c085: feat (ai): support string model ids through gateway
|
8
|
+
- 48a7606: feat (ai): support changing the system prompt in prepareSteps
|
9
|
+
- Updated dependencies [0d2c085]
|
10
|
+
- Updated dependencies [6c2c708]
|
11
|
+
- @ai-sdk/provider@2.0.0-alpha.6
|
12
|
+
- @ai-sdk/gateway@1.0.0-alpha.6
|
13
|
+
- @ai-sdk/provider-utils@3.0.0-alpha.6
|
14
|
+
|
15
|
+
## 5.0.0-alpha.5
|
16
|
+
|
17
|
+
### Major Changes
|
18
|
+
|
19
|
+
- ef256ed: chore (ai): refactor and use chatstore in svelte
|
20
|
+
- 1ed0287: chore (ai): stable sendStart/sendFinish options
|
21
|
+
|
22
|
+
### Patch Changes
|
23
|
+
|
24
|
+
- 655cf3c: feat (ui): add onFinish to createUIMessageStream
|
25
|
+
- 1675396: fix: avoid job executor deadlock when adding tool result
|
26
|
+
- cf9af6e: feat (ai): allow sync prepareStep
|
27
|
+
- 825e8d7: release alpha.5
|
28
|
+
- 7324c21: fix (ai/telemetry): Avoid JSON.stringify on Uint8Arrays for telemetry
|
29
|
+
|
3
30
|
## 5.0.0-alpha.4
|
4
31
|
|
5
32
|
### Major Changes
|
package/dist/index.d.mts
CHANGED
@@ -2,6 +2,7 @@ import { ToolResultContent, Schema, ToolCall, ToolResult, IdGenerator, FetchFunc
|
|
2
2
|
export { IdGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
|
3
3
|
import { AISDKError, SharedV2ProviderMetadata, SharedV2ProviderOptions, EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, LanguageModelV2Usage, JSONObject, LanguageModelV2ToolCall, JSONSchema7, LanguageModelV2CallOptions, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
|
+
import { GatewayModelId } from '@ai-sdk/gateway';
|
5
6
|
import { z } from 'zod';
|
6
7
|
import { ServerResponse } from 'node:http';
|
7
8
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -382,7 +383,7 @@ type JSONValue = JSONValue$1;
|
|
382
383
|
/**
|
383
384
|
Language model that is used by the AI SDK Core functions.
|
384
385
|
*/
|
385
|
-
type LanguageModel = LanguageModelV2;
|
386
|
+
type LanguageModel = GatewayModelId | LanguageModelV2;
|
386
387
|
/**
|
387
388
|
Reason why a language model finished generating a response.
|
388
389
|
|
@@ -2693,6 +2694,29 @@ declare namespace output {
|
|
2693
2694
|
};
|
2694
2695
|
}
|
2695
2696
|
|
2697
|
+
/**
|
2698
|
+
Function that you can use to provide different settings for a step.
|
2699
|
+
|
2700
|
+
@param options - The options for the step.
|
2701
|
+
@param options.steps - The steps that have been executed so far.
|
2702
|
+
@param options.stepNumber - The number of the step that is being executed.
|
2703
|
+
@param options.model - The model that is being used.
|
2704
|
+
|
2705
|
+
@returns An object that contains the settings for the step.
|
2706
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2707
|
+
*/
|
2708
|
+
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
|
2709
|
+
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
2710
|
+
stepNumber: number;
|
2711
|
+
model: LanguageModel;
|
2712
|
+
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
|
2713
|
+
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
|
2714
|
+
model?: LanguageModel;
|
2715
|
+
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2716
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2717
|
+
system?: string;
|
2718
|
+
} | undefined;
|
2719
|
+
|
2696
2720
|
type StopCondition<TOOLS extends ToolSet> = (options: {
|
2697
2721
|
steps: Array<StepResult<TOOLS>>;
|
2698
2722
|
}) => PromiseLike<boolean> | boolean;
|
@@ -2751,7 +2775,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2751
2775
|
@returns
|
2752
2776
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
2753
2777
|
*/
|
2754
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2778
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2755
2779
|
/**
|
2756
2780
|
The language model to use.
|
2757
2781
|
*/
|
@@ -2797,35 +2821,11 @@ Optional specification for parsing structured outputs from the LLM response.
|
|
2797
2821
|
/**
|
2798
2822
|
* @deprecated Use `prepareStep` instead.
|
2799
2823
|
*/
|
2800
|
-
experimental_prepareStep?:
|
2801
|
-
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
2802
|
-
stepNumber: number;
|
2803
|
-
model: LanguageModel;
|
2804
|
-
}) => PromiseLike<{
|
2805
|
-
model?: LanguageModel;
|
2806
|
-
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2807
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2808
|
-
} | undefined>;
|
2824
|
+
experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
2809
2825
|
/**
|
2810
2826
|
Optional function that you can use to provide different settings for a step.
|
2811
|
-
|
2812
|
-
@param options - The options for the step.
|
2813
|
-
@param options.steps - The steps that have been executed so far.
|
2814
|
-
@param options.stepNumber - The number of the step that is being executed.
|
2815
|
-
@param options.model - The model that is being used.
|
2816
|
-
|
2817
|
-
@returns An object that contains the settings for the step.
|
2818
|
-
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2819
2827
|
*/
|
2820
|
-
prepareStep?:
|
2821
|
-
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
2822
|
-
stepNumber: number;
|
2823
|
-
model: LanguageModel;
|
2824
|
-
}) => PromiseLike<{
|
2825
|
-
model?: LanguageModel;
|
2826
|
-
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
2827
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2828
|
-
} | undefined>;
|
2828
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
2829
2829
|
/**
|
2830
2830
|
A function that attempts to repair a tool call that failed to parse.
|
2831
2831
|
*/
|
@@ -2899,7 +2899,7 @@ type UIMessageStreamOptions = {
|
|
2899
2899
|
* that send additional data.
|
2900
2900
|
* Default to true.
|
2901
2901
|
*/
|
2902
|
-
|
2902
|
+
sendFinish?: boolean;
|
2903
2903
|
/**
|
2904
2904
|
* Send the message start event to the client.
|
2905
2905
|
* Set to false if you are using additional streamText calls
|
@@ -2911,7 +2911,7 @@ type UIMessageStreamOptions = {
|
|
2911
2911
|
* streamText calls that send additional data to prevent
|
2912
2912
|
* the message start event from being sent multiple times.
|
2913
2913
|
*/
|
2914
|
-
|
2914
|
+
sendStart?: boolean;
|
2915
2915
|
/**
|
2916
2916
|
* Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
|
2917
2917
|
*
|
@@ -3317,15 +3317,7 @@ Optional function that you can use to provide different settings for a step.
|
|
3317
3317
|
@returns An object that contains the settings for the step.
|
3318
3318
|
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
3319
3319
|
*/
|
3320
|
-
prepareStep?:
|
3321
|
-
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
3322
|
-
stepNumber: number;
|
3323
|
-
model: LanguageModel;
|
3324
|
-
}) => PromiseLike<{
|
3325
|
-
model?: LanguageModel;
|
3326
|
-
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
3327
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
3328
|
-
} | undefined>;
|
3320
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
3329
3321
|
/**
|
3330
3322
|
A function that attempts to repair a tool call that failed to parse.
|
3331
3323
|
*/
|
@@ -3670,6 +3662,20 @@ functionality that can be fully encapsulated in the provider.
|
|
3670
3662
|
};
|
3671
3663
|
}): Promise<GenerateObjectResult<RESULT>>;
|
3672
3664
|
|
3665
|
+
/**
|
3666
|
+
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
3667
|
+
* comparing the similarity of two vectors such as embeddings.
|
3668
|
+
*
|
3669
|
+
* @param vector1 - The first vector.
|
3670
|
+
* @param vector2 - The second vector.
|
3671
|
+
*
|
3672
|
+
* @returns The cosine similarity between vector1 and vector2.
|
3673
|
+
* @returns 0 if either vector is the zero vector.
|
3674
|
+
*
|
3675
|
+
* @throws {InvalidArgumentError} If the vectors do not have the same length.
|
3676
|
+
*/
|
3677
|
+
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
3678
|
+
|
3673
3679
|
/**
|
3674
3680
|
* Converts a data URL of type text/* to a text string.
|
3675
3681
|
*/
|
@@ -3689,19 +3695,14 @@ declare function parsePartialJson(jsonText: string | undefined): Promise<{
|
|
3689
3695
|
state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
|
3690
3696
|
}>;
|
3691
3697
|
|
3692
|
-
|
3693
|
-
|
3694
|
-
|
3695
|
-
|
3696
|
-
|
3697
|
-
|
3698
|
-
|
3699
|
-
|
3700
|
-
* @returns 0 if either vector is the zero vector.
|
3701
|
-
*
|
3702
|
-
* @throws {InvalidArgumentError} If the vectors do not have the same length.
|
3703
|
-
*/
|
3704
|
-
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
3698
|
+
type Job = () => Promise<void>;
|
3699
|
+
|
3700
|
+
declare class SerialJobExecutor {
|
3701
|
+
private queue;
|
3702
|
+
private isProcessing;
|
3703
|
+
private processQueue;
|
3704
|
+
run(job: Job): Promise<void>;
|
3705
|
+
}
|
3705
3706
|
|
3706
3707
|
/**
|
3707
3708
|
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
@@ -4160,15 +4161,15 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
|
|
4160
4161
|
*
|
4161
4162
|
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
4162
4163
|
*/
|
4163
|
-
declare function customProvider<LANGUAGE_MODELS extends Record<string,
|
4164
|
+
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>>({ languageModels, textEmbeddingModels, imageModels, fallbackProvider, }: {
|
4164
4165
|
languageModels?: LANGUAGE_MODELS;
|
4165
4166
|
textEmbeddingModels?: EMBEDDING_MODELS;
|
4166
4167
|
imageModels?: IMAGE_MODELS;
|
4167
4168
|
fallbackProvider?: ProviderV2;
|
4168
|
-
}):
|
4169
|
-
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>):
|
4170
|
-
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>):
|
4171
|
-
imageModel(modelId: ExtractModelId<IMAGE_MODELS>):
|
4169
|
+
}): ProviderV2 & {
|
4170
|
+
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
4171
|
+
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
|
4172
|
+
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
4172
4173
|
};
|
4173
4174
|
/**
|
4174
4175
|
* @deprecated Use `customProvider` instead.
|
@@ -4193,12 +4194,12 @@ declare class NoSuchProviderError extends NoSuchModelError {
|
|
4193
4194
|
|
4194
4195
|
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
4195
4196
|
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
4196
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never):
|
4197
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
4198
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never):
|
4199
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
4200
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never):
|
4201
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never):
|
4197
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
4198
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
4199
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
|
4200
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
|
4201
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
4202
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
4202
4203
|
}
|
4203
4204
|
/**
|
4204
4205
|
* Creates a registry for the given providers.
|
@@ -4379,15 +4380,6 @@ type UIMessageStreamPart = {
|
|
4379
4380
|
type: 'reasoning-part-finish';
|
4380
4381
|
};
|
4381
4382
|
|
4382
|
-
type Job = () => Promise<void>;
|
4383
|
-
|
4384
|
-
declare class SerialJobExecutor {
|
4385
|
-
private queue;
|
4386
|
-
private isProcessing;
|
4387
|
-
private processQueue;
|
4388
|
-
run(job: Job): Promise<void>;
|
4389
|
-
}
|
4390
|
-
|
4391
4383
|
interface UIMessageStreamWriter {
|
4392
4384
|
/**
|
4393
4385
|
* Appends a data stream part to the stream.
|
@@ -4405,9 +4397,32 @@ interface UIMessageStreamWriter {
|
|
4405
4397
|
onError: ((error: unknown) => string) | undefined;
|
4406
4398
|
}
|
4407
4399
|
|
4408
|
-
declare function createUIMessageStream({ execute, onError,
|
4409
|
-
|
4400
|
+
declare function createUIMessageStream({ execute, onError, // mask error messages for safety by default
|
4401
|
+
originalMessages, onFinish, }: {
|
4402
|
+
execute: (options: {
|
4403
|
+
writer: UIMessageStreamWriter;
|
4404
|
+
}) => Promise<void> | void;
|
4410
4405
|
onError?: (error: unknown) => string;
|
4406
|
+
/**
|
4407
|
+
* The original messages.
|
4408
|
+
*/
|
4409
|
+
originalMessages?: UIMessage[];
|
4410
|
+
onFinish?: (options: {
|
4411
|
+
/**
|
4412
|
+
* The updates list of UI messages.
|
4413
|
+
*/
|
4414
|
+
messages: UIMessage[];
|
4415
|
+
/**
|
4416
|
+
* Indicates whether the response message is a continuation of the last original message,
|
4417
|
+
* or if a new message was created.
|
4418
|
+
*/
|
4419
|
+
isContinuation: boolean;
|
4420
|
+
/**
|
4421
|
+
* The message that was sent to the client as a response
|
4422
|
+
* (including the original message if it was extended).
|
4423
|
+
*/
|
4424
|
+
responseMessage: UIMessage;
|
4425
|
+
}) => void;
|
4411
4426
|
}): ReadableStream<UIMessageStreamPart>;
|
4412
4427
|
|
4413
4428
|
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
|
@@ -4564,16 +4579,10 @@ interface ChatStoreEvent {
|
|
4564
4579
|
error?: Error;
|
4565
4580
|
}
|
4566
4581
|
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
4567
|
-
|
4568
|
-
|
4569
|
-
|
4570
|
-
|
4571
|
-
activeResponse?: {
|
4572
|
-
state: StreamingUIMessageState<MESSAGE_METADATA>;
|
4573
|
-
abortController?: AbortController;
|
4574
|
-
};
|
4575
|
-
jobExecutor: SerialJobExecutor;
|
4576
|
-
}
|
4582
|
+
type ActiveResponse<MESSAGE_METADATA> = {
|
4583
|
+
state: StreamingUIMessageState<MESSAGE_METADATA>;
|
4584
|
+
abortController: AbortController | undefined;
|
4585
|
+
};
|
4577
4586
|
type ExtendedCallOptions<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = ChatRequestOptions & {
|
4578
4587
|
onError?: (error: Error) => void;
|
4579
4588
|
/**
|
@@ -4599,15 +4608,47 @@ type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
|
4599
4608
|
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
4600
4609
|
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
4601
4610
|
};
|
4602
|
-
|
4611
|
+
type ChatFactory<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> = (options: {
|
4612
|
+
messages?: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
|
4613
|
+
}) => Chat<MESSAGE_METADATA, DATA_TYPES>;
|
4614
|
+
type ChatStoreOptions<MESSAGE_METADATA, DATA_PART_SCHEMAS extends UIDataPartSchemas> = {
|
4615
|
+
chats?: {
|
4616
|
+
[id: string]: {
|
4617
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<DATA_PART_SCHEMAS>>[];
|
4618
|
+
};
|
4619
|
+
};
|
4620
|
+
generateId?: UseChatOptions['generateId'];
|
4621
|
+
transport: ChatTransport<MESSAGE_METADATA, InferUIDataParts<DATA_PART_SCHEMAS>>;
|
4622
|
+
maxSteps?: number;
|
4623
|
+
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4624
|
+
dataPartSchemas?: DATA_PART_SCHEMAS;
|
4625
|
+
};
|
4626
|
+
type ChatStoreFactory<MESSAGE_METADATA, DATA_PART_SCHEMAS extends UIDataPartSchemas> = (options: ChatStoreOptions<MESSAGE_METADATA, DATA_PART_SCHEMAS>) => ChatStore<MESSAGE_METADATA, DATA_PART_SCHEMAS>;
|
4627
|
+
interface Chat<MESSAGE_METADATA, DATA_TYPES extends UIDataTypes> {
|
4628
|
+
readonly status: ChatStatus;
|
4629
|
+
readonly messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[];
|
4630
|
+
readonly error: Error | undefined;
|
4631
|
+
readonly activeResponse: ActiveResponse<MESSAGE_METADATA> | undefined;
|
4632
|
+
readonly jobExecutor: SerialJobExecutor;
|
4633
|
+
setStatus: (status: ChatStatus) => void;
|
4634
|
+
setError: (error: Error | undefined) => void;
|
4635
|
+
setActiveResponse: (activeResponse: ActiveResponse<MESSAGE_METADATA> | undefined) => void;
|
4636
|
+
pushMessage: (message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
|
4637
|
+
popMessage: () => void;
|
4638
|
+
replaceMessage: (index: number, message: UIMessage<MESSAGE_METADATA, DATA_TYPES>) => void;
|
4639
|
+
setMessages: (messages: UIMessage<MESSAGE_METADATA, DATA_TYPES>[]) => void;
|
4640
|
+
snapshot?: <T>(thing: T) => T;
|
4641
|
+
}
|
4642
|
+
declare class ChatStore<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
|
4603
4643
|
private chats;
|
4644
|
+
private readonly createChat;
|
4604
4645
|
private subscribers;
|
4605
4646
|
private generateId;
|
4606
4647
|
private messageMetadataSchema;
|
4607
4648
|
private dataPartSchemas;
|
4608
4649
|
private transport;
|
4609
4650
|
private maxSteps;
|
4610
|
-
constructor({ chats, generateId, transport, maxSteps, messageMetadataSchema, dataPartSchemas, }: {
|
4651
|
+
constructor({ chats, generateId, transport, maxSteps, messageMetadataSchema, dataPartSchemas, createChat, }: {
|
4611
4652
|
chats?: {
|
4612
4653
|
[id: string]: {
|
4613
4654
|
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
@@ -4618,6 +4659,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4618
4659
|
maxSteps?: number;
|
4619
4660
|
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4620
4661
|
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
4662
|
+
createChat: ChatFactory<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>;
|
4621
4663
|
});
|
4622
4664
|
hasChat(id: string): boolean;
|
4623
4665
|
addChat(id: string, messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[]): void;
|
@@ -4626,7 +4668,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4626
4668
|
getStatus(id: string): ChatStatus;
|
4627
4669
|
setStatus({ id, status, error, }: {
|
4628
4670
|
id: string;
|
4629
|
-
status:
|
4671
|
+
status: ChatStatus;
|
4630
4672
|
error?: Error;
|
4631
4673
|
}): void;
|
4632
4674
|
getError(id: string): Error | undefined;
|
@@ -4657,7 +4699,7 @@ declare class ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPar
|
|
4657
4699
|
chatId: string;
|
4658
4700
|
}): Promise<void>;
|
4659
4701
|
private emit;
|
4660
|
-
private
|
4702
|
+
private getChatState;
|
4661
4703
|
private triggerRequest;
|
4662
4704
|
}
|
4663
4705
|
|
@@ -4710,9 +4752,21 @@ type UseChatOptions<MESSAGE_METADATA = unknown, DATA_TYPE_SCHEMAS extends UIData
|
|
4710
4752
|
*/
|
4711
4753
|
generateId?: IdGenerator;
|
4712
4754
|
/**
|
4713
|
-
*
|
4755
|
+
* Chat store that should be used.
|
4756
|
+
* It must not change during the component lifecycle.
|
4757
|
+
*
|
4758
|
+
* When a ChatStore is provided, it will be used as is.
|
4759
|
+
* It should be stable and the stability is guaranteed by the user.
|
4760
|
+
*
|
4761
|
+
* When a function is provided, it will be called to create a new chat store.
|
4762
|
+
* The function will be called when the hook is mounted and the chat store will be
|
4763
|
+
* created.
|
4764
|
+
* The function will be called with the same arguments as the hook is called with.
|
4765
|
+
* The function should return a ChatStoreOptions object.
|
4766
|
+
*
|
4767
|
+
* When no value is provided, a default chat store will be created.
|
4714
4768
|
*/
|
4715
|
-
chatStore?: ChatStore<MESSAGE_METADATA, DATA_TYPE_SCHEMAS
|
4769
|
+
chatStore?: ChatStore<MESSAGE_METADATA, DATA_TYPE_SCHEMAS> | (() => ChatStoreOptions<MESSAGE_METADATA, DATA_TYPE_SCHEMAS>);
|
4716
4770
|
};
|
4717
4771
|
type OriginalUseChatOptions<MESSAGE_METADATA = unknown> = {
|
4718
4772
|
/**
|
@@ -4860,84 +4914,6 @@ declare function convertToModelMessages<TOOLS extends ToolSet = never>(messages:
|
|
4860
4914
|
*/
|
4861
4915
|
declare const convertToCoreMessages: typeof convertToModelMessages;
|
4862
4916
|
|
4863
|
-
declare function defaultChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas>({ api, fetch, credentials, headers, body, prepareRequestBody, generateId, dataPartSchemas, messageMetadataSchema, maxSteps, chats, }: {
|
4864
|
-
/**
|
4865
|
-
* Schema for the message metadata. Validates the message metadata.
|
4866
|
-
* Message metadata can be undefined or must match the schema.
|
4867
|
-
*/
|
4868
|
-
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
4869
|
-
/**
|
4870
|
-
* Schema for the data types. Validates the data types.
|
4871
|
-
*/
|
4872
|
-
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
4873
|
-
/**
|
4874
|
-
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
|
4875
|
-
* a stream of tokens of the AI chat response.
|
4876
|
-
*/
|
4877
|
-
api: string;
|
4878
|
-
/**
|
4879
|
-
* A way to provide a function that is going to be used for ids for messages and the chat.
|
4880
|
-
* If not provided the default AI SDK `generateId` is used.
|
4881
|
-
*/
|
4882
|
-
generateId?: IdGenerator;
|
4883
|
-
/**
|
4884
|
-
* The credentials mode to be used for the fetch request.
|
4885
|
-
* Possible values are: 'omit', 'same-origin', 'include'.
|
4886
|
-
* Defaults to 'same-origin'.
|
4887
|
-
*/
|
4888
|
-
credentials?: RequestCredentials;
|
4889
|
-
/**
|
4890
|
-
* HTTP headers to be sent with the API request.
|
4891
|
-
*/
|
4892
|
-
headers?: Record<string, string> | Headers;
|
4893
|
-
/**
|
4894
|
-
* Extra body object to be sent with the API request.
|
4895
|
-
* @example
|
4896
|
-
* Send a `sessionId` to the API along with the messages.
|
4897
|
-
* ```js
|
4898
|
-
* useChat({
|
4899
|
-
* body: {
|
4900
|
-
* sessionId: '123',
|
4901
|
-
* }
|
4902
|
-
* })
|
4903
|
-
* ```
|
4904
|
-
*/
|
4905
|
-
body?: object;
|
4906
|
-
/**
|
4907
|
-
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
4908
|
-
or to provide a custom fetch implementation for e.g. testing.
|
4909
|
-
*/
|
4910
|
-
fetch?: FetchFunction;
|
4911
|
-
/**
|
4912
|
-
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
4913
|
-
Must be at least 1.
|
4914
|
-
|
4915
|
-
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
4916
|
-
|
4917
|
-
By default, it's set to 1, which means that only a single LLM call is made.
|
4918
|
-
*/
|
4919
|
-
maxSteps?: number;
|
4920
|
-
/**
|
4921
|
-
* When a function is provided, it will be used
|
4922
|
-
* to prepare the request body for the chat API. This can be useful for
|
4923
|
-
* customizing the request body based on the messages and data in the chat.
|
4924
|
-
*
|
4925
|
-
* @param chatId The id of the chat.
|
4926
|
-
* @param messages The current messages in the chat.
|
4927
|
-
* @param requestBody The request body object passed in the chat request.
|
4928
|
-
*/
|
4929
|
-
prepareRequestBody?: (options: {
|
4930
|
-
chatId: string;
|
4931
|
-
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
4932
|
-
requestBody?: object;
|
4933
|
-
}) => unknown;
|
4934
|
-
chats?: {
|
4935
|
-
[id: string]: {
|
4936
|
-
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
4937
|
-
};
|
4938
|
-
};
|
4939
|
-
}): ChatStore<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>;
|
4940
|
-
|
4941
4917
|
declare function extractMaxToolInvocationStep(toolInvocations: ToolInvocation[] | undefined): number | undefined;
|
4942
4918
|
|
4943
4919
|
declare function getToolInvocations(message: UIMessage): ToolInvocation[];
|
@@ -5044,4 +5020,85 @@ type UseCompletionOptions = {
|
|
5044
5020
|
fetch?: FetchFunction;
|
5045
5021
|
};
|
5046
5022
|
|
5047
|
-
|
5023
|
+
interface DefaultChatStoreOptions<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas> {
|
5024
|
+
/**
|
5025
|
+
* Schema for the message metadata. Validates the message metadata.
|
5026
|
+
* Message metadata can be undefined or must match the schema.
|
5027
|
+
*/
|
5028
|
+
messageMetadataSchema?: Validator<MESSAGE_METADATA> | StandardSchemaV1<MESSAGE_METADATA>;
|
5029
|
+
/**
|
5030
|
+
* Schema for the data types. Validates the data types.
|
5031
|
+
*/
|
5032
|
+
dataPartSchemas?: UI_DATA_PART_SCHEMAS;
|
5033
|
+
/**
|
5034
|
+
* The API endpoint that accepts a `{ messages: Message[] }` object and returns
|
5035
|
+
* a stream of tokens of the AI chat response.
|
5036
|
+
*
|
5037
|
+
* Defaults to `/api/chat`
|
5038
|
+
*/
|
5039
|
+
api?: string;
|
5040
|
+
/**
|
5041
|
+
* A way to provide a function that is going to be used for ids for messages and the chat.
|
5042
|
+
* If not provided the default AI SDK `generateId` is used.
|
5043
|
+
*/
|
5044
|
+
generateId?: IdGenerator;
|
5045
|
+
/**
|
5046
|
+
* The credentials mode to be used for the fetch request.
|
5047
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
5048
|
+
* Defaults to 'same-origin'.
|
5049
|
+
*/
|
5050
|
+
credentials?: RequestCredentials;
|
5051
|
+
/**
|
5052
|
+
* HTTP headers to be sent with the API request.
|
5053
|
+
*/
|
5054
|
+
headers?: Record<string, string> | Headers;
|
5055
|
+
/**
|
5056
|
+
* Extra body object to be sent with the API request.
|
5057
|
+
* @example
|
5058
|
+
* Send a `sessionId` to the API along with the messages.
|
5059
|
+
* ```js
|
5060
|
+
* useChat({
|
5061
|
+
* body: {
|
5062
|
+
* sessionId: '123',
|
5063
|
+
* }
|
5064
|
+
* })
|
5065
|
+
* ```
|
5066
|
+
*/
|
5067
|
+
body?: object;
|
5068
|
+
/**
|
5069
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
5070
|
+
or to provide a custom fetch implementation for e.g. testing.
|
5071
|
+
*/
|
5072
|
+
fetch?: FetchFunction;
|
5073
|
+
/**
|
5074
|
+
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
5075
|
+
Must be at least 1.
|
5076
|
+
|
5077
|
+
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
5078
|
+
|
5079
|
+
By default, it's set to 1, which means that only a single LLM call is made.
|
5080
|
+
*/
|
5081
|
+
maxSteps?: number;
|
5082
|
+
/**
|
5083
|
+
* When a function is provided, it will be used
|
5084
|
+
* to prepare the request body for the chat API. This can be useful for
|
5085
|
+
* customizing the request body based on the messages and data in the chat.
|
5086
|
+
*
|
5087
|
+
* @param chatId The id of the chat.
|
5088
|
+
* @param messages The current messages in the chat.
|
5089
|
+
* @param requestBody The request body object passed in the chat request.
|
5090
|
+
*/
|
5091
|
+
prepareRequestBody?: (options: {
|
5092
|
+
chatId: string;
|
5093
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
5094
|
+
requestBody?: object;
|
5095
|
+
}) => unknown;
|
5096
|
+
chats?: {
|
5097
|
+
[id: string]: {
|
5098
|
+
messages: UIMessage<MESSAGE_METADATA, InferUIDataParts<UI_DATA_PART_SCHEMAS>>[];
|
5099
|
+
};
|
5100
|
+
};
|
5101
|
+
}
|
5102
|
+
declare function defaultChatStoreOptions<MESSAGE_METADATA = unknown, UI_DATA_PART_SCHEMAS extends UIDataPartSchemas = UIDataPartSchemas>({ api, fetch, credentials, headers, body, prepareRequestBody, generateId, messageMetadataSchema, maxSteps, dataPartSchemas, chats, }: DefaultChatStoreOptions<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>): () => ChatStoreOptions<MESSAGE_METADATA, UI_DATA_PART_SCHEMAS>;
|
5103
|
+
|
5104
|
+
export { ActiveResponse, AssistantContent, AssistantModelMessage, CallSettings, CallWarning, Chat, ChatRequestOptions, ChatStatus, ChatStore, ChatStoreEvent, ChatStoreFactory, ChatStoreOptions, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataUIPart, DeepPartial, DefaultChatStoreOptions, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InferUIDataParts, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, OriginalUseChatOptions, output as Output, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamChatTransport, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultChatStoreOptions, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, hasToolCall, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };
|