ai 5.0.0-beta.11 → 5.0.0-beta.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/dist/index.d.mts +1704 -1668
- package/dist/index.d.ts +1704 -1668
- package/dist/index.js +3906 -3884
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3966 -3944
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +17 -17
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +17 -17
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.js +3 -3
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs +3 -3
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/dist/test/index.d.mts +40 -2
- package/dist/test/index.d.ts +40 -2
- package/dist/test/index.js +54 -4
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +51 -4
- package/dist/test/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.d.mts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { ModelMessage, Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator,
|
1
|
+
import { ModelMessage, Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, InferSchema, FlexibleSchema, DataContent, Validator, StandardSchemaV1, ToolCall, Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
|
2
2
|
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, generateId, jsonSchema, tool } from '@ai-sdk/provider-utils';
|
3
3
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
4
4
|
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SpeechModelV2, SpeechModelV2CallWarning, TranscriptionModelV2, TranscriptionModelV2CallWarning, LanguageModelV2Usage, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
|
@@ -717,12 +717,14 @@ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Too
|
|
717
717
|
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
718
718
|
stepNumber: number;
|
719
719
|
model: LanguageModel;
|
720
|
+
messages: Array<ModelMessage>;
|
720
721
|
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
|
721
722
|
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
|
722
723
|
model?: LanguageModel;
|
723
724
|
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
724
725
|
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
725
726
|
system?: string;
|
727
|
+
messages?: Array<ModelMessage>;
|
726
728
|
} | undefined;
|
727
729
|
|
728
730
|
type StopCondition<TOOLS extends ToolSet> = (options: {
|
@@ -948,6 +950,236 @@ A function that attempts to repair a tool call that failed to parse.
|
|
948
950
|
};
|
949
951
|
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
950
952
|
|
953
|
+
/**
|
954
|
+
* Detects the first chunk in a buffer.
|
955
|
+
*
|
956
|
+
* @param buffer - The buffer to detect the first chunk in.
|
957
|
+
*
|
958
|
+
* @returns The first detected chunk, or `undefined` if no chunk was detected.
|
959
|
+
*/
|
960
|
+
type ChunkDetector = (buffer: string) => string | undefined | null;
|
961
|
+
/**
|
962
|
+
* Smooths text streaming output.
|
963
|
+
*
|
964
|
+
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
965
|
+
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
966
|
+
*
|
967
|
+
* @returns A transform stream that smooths text streaming output.
|
968
|
+
*/
|
969
|
+
declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
|
970
|
+
delayInMs?: number | null;
|
971
|
+
chunking?: 'word' | 'line' | RegExp | ChunkDetector;
|
972
|
+
/**
|
973
|
+
* Internal. For test use only. May change without notice.
|
974
|
+
*/
|
975
|
+
_internal?: {
|
976
|
+
delay?: (delayInMs: number | null) => Promise<void>;
|
977
|
+
};
|
978
|
+
}): (options: {
|
979
|
+
tools: TOOLS;
|
980
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
981
|
+
|
982
|
+
/**
|
983
|
+
A transformation that is applied to the stream.
|
984
|
+
|
985
|
+
@param stopStream - A function that stops the source stream.
|
986
|
+
@param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
987
|
+
*/
|
988
|
+
type StreamTextTransform<TOOLS extends ToolSet> = (options: {
|
989
|
+
tools: TOOLS;
|
990
|
+
stopStream: () => void;
|
991
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
992
|
+
/**
|
993
|
+
Callback that is set using the `onError` option.
|
994
|
+
|
995
|
+
@param event - The event that is passed to the callback.
|
996
|
+
*/
|
997
|
+
type StreamTextOnErrorCallback = (event: {
|
998
|
+
error: unknown;
|
999
|
+
}) => Promise<void> | void;
|
1000
|
+
/**
|
1001
|
+
Callback that is set using the `onStepFinish` option.
|
1002
|
+
|
1003
|
+
@param stepResult - The result of the step.
|
1004
|
+
*/
|
1005
|
+
type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
1006
|
+
/**
|
1007
|
+
Callback that is set using the `onChunk` option.
|
1008
|
+
|
1009
|
+
@param event - The event that is passed to the callback.
|
1010
|
+
*/
|
1011
|
+
type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
|
1012
|
+
chunk: Extract<TextStreamPart<TOOLS>, {
|
1013
|
+
type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
|
1014
|
+
}>;
|
1015
|
+
}) => Promise<void> | void;
|
1016
|
+
/**
|
1017
|
+
Callback that is set using the `onFinish` option.
|
1018
|
+
|
1019
|
+
@param event - The event that is passed to the callback.
|
1020
|
+
*/
|
1021
|
+
type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
1022
|
+
/**
|
1023
|
+
Details for all steps.
|
1024
|
+
*/
|
1025
|
+
readonly steps: StepResult<TOOLS>[];
|
1026
|
+
/**
|
1027
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
1028
|
+
*/
|
1029
|
+
readonly totalUsage: LanguageModelUsage;
|
1030
|
+
}) => Promise<void> | void;
|
1031
|
+
/**
|
1032
|
+
Generate a text and call tools for a given prompt using a language model.
|
1033
|
+
|
1034
|
+
This function streams the output. If you do not want to stream the output, use `generateText` instead.
|
1035
|
+
|
1036
|
+
@param model - The language model to use.
|
1037
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
1038
|
+
|
1039
|
+
@param system - A system message that will be part of the prompt.
|
1040
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
1041
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
1042
|
+
|
1043
|
+
@param maxOutputTokens - Maximum number of tokens to generate.
|
1044
|
+
@param temperature - Temperature setting.
|
1045
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1046
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1047
|
+
@param topP - Nucleus sampling.
|
1048
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1049
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1050
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
1051
|
+
Used to remove "long tail" low probability responses.
|
1052
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
1053
|
+
@param presencePenalty - Presence penalty setting.
|
1054
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
1055
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1056
|
+
@param frequencyPenalty - Frequency penalty setting.
|
1057
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
1058
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1059
|
+
@param stopSequences - Stop sequences.
|
1060
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
1061
|
+
@param seed - The seed (integer) to use for random sampling.
|
1062
|
+
If set and supported by the model, calls will generate deterministic results.
|
1063
|
+
|
1064
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1065
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1066
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1067
|
+
|
1068
|
+
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
1069
|
+
|
1070
|
+
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
1071
|
+
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
1072
|
+
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1073
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
1074
|
+
(for tools that have an `execute` function) are finished.
|
1075
|
+
|
1076
|
+
@return
|
1077
|
+
A result object for accessing different stream types and additional information.
|
1078
|
+
*/
|
1079
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, includeRawChunks, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1080
|
+
/**
|
1081
|
+
The language model to use.
|
1082
|
+
*/
|
1083
|
+
model: LanguageModel;
|
1084
|
+
/**
|
1085
|
+
The tools that the model can call. The model needs to support calling tools.
|
1086
|
+
*/
|
1087
|
+
tools?: TOOLS;
|
1088
|
+
/**
|
1089
|
+
The tool choice strategy. Default: 'auto'.
|
1090
|
+
*/
|
1091
|
+
toolChoice?: ToolChoice<TOOLS>;
|
1092
|
+
/**
|
1093
|
+
Condition for stopping the generation when there are tool results in the last step.
|
1094
|
+
When the condition is an array, any of the conditions can be met to stop the generation.
|
1095
|
+
|
1096
|
+
@default stepCountIs(1)
|
1097
|
+
*/
|
1098
|
+
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
1099
|
+
/**
|
1100
|
+
Optional telemetry configuration (experimental).
|
1101
|
+
*/
|
1102
|
+
experimental_telemetry?: TelemetrySettings;
|
1103
|
+
/**
|
1104
|
+
Additional provider-specific options. They are passed through
|
1105
|
+
to the provider from the AI SDK and enable provider-specific
|
1106
|
+
functionality that can be fully encapsulated in the provider.
|
1107
|
+
*/
|
1108
|
+
providerOptions?: ProviderOptions;
|
1109
|
+
/**
|
1110
|
+
* @deprecated Use `activeTools` instead.
|
1111
|
+
*/
|
1112
|
+
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
|
1113
|
+
/**
|
1114
|
+
Limits the tools that are available for the model to call without
|
1115
|
+
changing the tool call and result types in the result.
|
1116
|
+
*/
|
1117
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
1118
|
+
/**
|
1119
|
+
Optional specification for parsing structured outputs from the LLM response.
|
1120
|
+
*/
|
1121
|
+
experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
|
1122
|
+
/**
|
1123
|
+
Optional function that you can use to provide different settings for a step.
|
1124
|
+
|
1125
|
+
@param options - The options for the step.
|
1126
|
+
@param options.steps - The steps that have been executed so far.
|
1127
|
+
@param options.stepNumber - The number of the step that is being executed.
|
1128
|
+
@param options.model - The model that is being used.
|
1129
|
+
|
1130
|
+
@returns An object that contains the settings for the step.
|
1131
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
1132
|
+
*/
|
1133
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
1134
|
+
/**
|
1135
|
+
A function that attempts to repair a tool call that failed to parse.
|
1136
|
+
*/
|
1137
|
+
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
1138
|
+
/**
|
1139
|
+
Optional stream transformations.
|
1140
|
+
They are applied in the order they are provided.
|
1141
|
+
The stream transformations must maintain the stream structure for streamText to work correctly.
|
1142
|
+
*/
|
1143
|
+
experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
|
1144
|
+
/**
|
1145
|
+
Whether to include raw chunks from the provider in the stream.
|
1146
|
+
When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
|
1147
|
+
This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
|
1148
|
+
Defaults to false.
|
1149
|
+
*/
|
1150
|
+
includeRawChunks?: boolean;
|
1151
|
+
/**
|
1152
|
+
Callback that is called for each chunk of the stream.
|
1153
|
+
The stream processing will pause until the callback promise is resolved.
|
1154
|
+
*/
|
1155
|
+
onChunk?: StreamTextOnChunkCallback<TOOLS>;
|
1156
|
+
/**
|
1157
|
+
Callback that is invoked when an error occurs during streaming.
|
1158
|
+
You can use it to log errors.
|
1159
|
+
The stream processing will pause until the callback promise is resolved.
|
1160
|
+
*/
|
1161
|
+
onError?: StreamTextOnErrorCallback;
|
1162
|
+
/**
|
1163
|
+
Callback that is called when the LLM response and all request tool executions
|
1164
|
+
(for tools that have an `execute` function) are finished.
|
1165
|
+
|
1166
|
+
The usage is the combined usage of all steps.
|
1167
|
+
*/
|
1168
|
+
onFinish?: StreamTextOnFinishCallback<TOOLS>;
|
1169
|
+
/**
|
1170
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1171
|
+
*/
|
1172
|
+
onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
|
1173
|
+
/**
|
1174
|
+
Internal. For test use only. May change without notice.
|
1175
|
+
*/
|
1176
|
+
_internal?: {
|
1177
|
+
now?: () => number;
|
1178
|
+
generateId?: IdGenerator;
|
1179
|
+
currentDate?: () => Date;
|
1180
|
+
};
|
1181
|
+
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
1182
|
+
|
951
1183
|
/**
|
952
1184
|
The data types that can be used in the UI message for the UI message data parts.
|
953
1185
|
*/
|
@@ -956,10 +1188,19 @@ type UITool = {
|
|
956
1188
|
input: unknown;
|
957
1189
|
output: unknown | undefined;
|
958
1190
|
};
|
1191
|
+
/**
|
1192
|
+
* Infer the input and output types of a tool so it can be used as a UI tool.
|
1193
|
+
*/
|
959
1194
|
type InferUITool<TOOL extends Tool> = {
|
960
1195
|
input: InferToolInput<TOOL>;
|
961
1196
|
output: InferToolOutput<TOOL>;
|
962
1197
|
};
|
1198
|
+
/**
|
1199
|
+
* Infer the input and output types of a tool set so it can be used as a UI tool set.
|
1200
|
+
*/
|
1201
|
+
type InferUITools<TOOLS extends ToolSet> = {
|
1202
|
+
[NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
|
1203
|
+
};
|
963
1204
|
type UITools = Record<string, UITool>;
|
964
1205
|
/**
|
965
1206
|
AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
|
@@ -1083,7 +1324,7 @@ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
|
|
1083
1324
|
toolCallId: string;
|
1084
1325
|
} & ({
|
1085
1326
|
state: 'input-streaming';
|
1086
|
-
input: DeepPartial<TOOLS[NAME]['input']
|
1327
|
+
input: DeepPartial<TOOLS[NAME]['input']> | undefined;
|
1087
1328
|
providerExecuted?: boolean;
|
1088
1329
|
output?: never;
|
1089
1330
|
errorText?: never;
|
@@ -1221,6 +1462,13 @@ type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
|
|
1221
1462
|
* and a message ID is provided for the response message.
|
1222
1463
|
*/
|
1223
1464
|
originalMessages?: UI_MESSAGE[];
|
1465
|
+
/**
|
1466
|
+
* Generate a message ID for the response message.
|
1467
|
+
*
|
1468
|
+
* If not provided, no message ID will be set for the response message (unless
|
1469
|
+
* the original messages are provided and the last message is an assistant message).
|
1470
|
+
*/
|
1471
|
+
generateMessageId?: IdGenerator;
|
1224
1472
|
onFinish?: (options: {
|
1225
1473
|
/**
|
1226
1474
|
* The updates list of UI messages.
|
@@ -1625,23 +1873,193 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
|
|
1625
1873
|
}): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
1626
1874
|
}
|
1627
1875
|
|
1628
|
-
|
1629
|
-
|
1630
|
-
|
1631
|
-
|
1632
|
-
|
1633
|
-
|
1634
|
-
|
1635
|
-
|
1636
|
-
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1876
|
+
/**
|
1877
|
+
The result of an `embed` call.
|
1878
|
+
It contains the embedding, the value, and additional information.
|
1879
|
+
*/
|
1880
|
+
interface EmbedResult<VALUE> {
|
1881
|
+
/**
|
1882
|
+
The value that was embedded.
|
1883
|
+
*/
|
1884
|
+
readonly value: VALUE;
|
1885
|
+
/**
|
1886
|
+
The embedding of the value.
|
1887
|
+
*/
|
1888
|
+
readonly embedding: Embedding;
|
1889
|
+
/**
|
1890
|
+
The embedding token usage.
|
1891
|
+
*/
|
1892
|
+
readonly usage: EmbeddingModelUsage;
|
1893
|
+
/**
|
1894
|
+
Optional response data.
|
1895
|
+
*/
|
1896
|
+
readonly response?: {
|
1897
|
+
/**
|
1898
|
+
Response headers.
|
1899
|
+
*/
|
1900
|
+
headers?: Record<string, string>;
|
1901
|
+
/**
|
1902
|
+
The response body.
|
1903
|
+
*/
|
1904
|
+
body?: unknown;
|
1905
|
+
};
|
1906
|
+
}
|
1907
|
+
|
1908
|
+
/**
|
1909
|
+
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
1910
|
+
|
1911
|
+
@param model - The embedding model to use.
|
1912
|
+
@param value - The value that should be embedded.
|
1913
|
+
|
1914
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1915
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1916
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1917
|
+
|
1918
|
+
@returns A result object that contains the embedding, the value, and additional information.
|
1919
|
+
*/
|
1920
|
+
declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
1921
|
+
/**
|
1922
|
+
The embedding model to use.
|
1923
|
+
*/
|
1924
|
+
model: EmbeddingModel<VALUE>;
|
1925
|
+
/**
|
1926
|
+
The value that should be embedded.
|
1927
|
+
*/
|
1928
|
+
value: VALUE;
|
1929
|
+
/**
|
1930
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
1931
|
+
|
1932
|
+
@default 2
|
1933
|
+
*/
|
1934
|
+
maxRetries?: number;
|
1935
|
+
/**
|
1936
|
+
Abort signal.
|
1937
|
+
*/
|
1938
|
+
abortSignal?: AbortSignal;
|
1939
|
+
/**
|
1940
|
+
Additional headers to include in the request.
|
1941
|
+
Only applicable for HTTP-based providers.
|
1942
|
+
*/
|
1943
|
+
headers?: Record<string, string>;
|
1944
|
+
/**
|
1945
|
+
Additional provider-specific options. They are passed through
|
1946
|
+
to the provider from the AI SDK and enable provider-specific
|
1947
|
+
functionality that can be fully encapsulated in the provider.
|
1948
|
+
*/
|
1949
|
+
providerOptions?: ProviderOptions;
|
1950
|
+
/**
|
1951
|
+
* Optional telemetry configuration (experimental).
|
1952
|
+
*/
|
1953
|
+
experimental_telemetry?: TelemetrySettings;
|
1954
|
+
}): Promise<EmbedResult<VALUE>>;
|
1955
|
+
|
1956
|
+
/**
|
1957
|
+
The result of a `embedMany` call.
|
1958
|
+
It contains the embeddings, the values, and additional information.
|
1959
|
+
*/
|
1960
|
+
interface EmbedManyResult<VALUE> {
|
1961
|
+
/**
|
1962
|
+
The values that were embedded.
|
1963
|
+
*/
|
1964
|
+
readonly values: Array<VALUE>;
|
1965
|
+
/**
|
1966
|
+
The embeddings. They are in the same order as the values.
|
1967
|
+
*/
|
1968
|
+
readonly embeddings: Array<Embedding>;
|
1969
|
+
/**
|
1970
|
+
The embedding token usage.
|
1971
|
+
*/
|
1972
|
+
readonly usage: EmbeddingModelUsage;
|
1973
|
+
/**
|
1974
|
+
Optional raw response data.
|
1975
|
+
*/
|
1976
|
+
readonly responses?: Array<{
|
1977
|
+
/**
|
1978
|
+
Response headers.
|
1979
|
+
*/
|
1980
|
+
headers?: Record<string, string>;
|
1981
|
+
/**
|
1982
|
+
The response body.
|
1983
|
+
*/
|
1984
|
+
body?: unknown;
|
1985
|
+
} | undefined>;
|
1986
|
+
}
|
1987
|
+
|
1988
|
+
/**
|
1989
|
+
Embed several values using an embedding model. The type of the value is defined
|
1990
|
+
by the embedding model.
|
1991
|
+
|
1992
|
+
`embedMany` automatically splits large requests into smaller chunks if the model
|
1993
|
+
has a limit on how many embeddings can be generated in a single call.
|
1994
|
+
|
1995
|
+
@param model - The embedding model to use.
|
1996
|
+
@param values - The values that should be embedded.
|
1997
|
+
|
1998
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1999
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2000
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2001
|
+
|
2002
|
+
@returns A result object that contains the embeddings, the value, and additional information.
|
2003
|
+
*/
|
2004
|
+
declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
2005
|
+
/**
|
2006
|
+
The embedding model to use.
|
2007
|
+
*/
|
2008
|
+
model: EmbeddingModel<VALUE>;
|
2009
|
+
/**
|
2010
|
+
The values that should be embedded.
|
2011
|
+
*/
|
2012
|
+
values: Array<VALUE>;
|
2013
|
+
/**
|
2014
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
2015
|
+
|
2016
|
+
@default 2
|
2017
|
+
*/
|
2018
|
+
maxRetries?: number;
|
2019
|
+
/**
|
2020
|
+
Abort signal.
|
2021
|
+
*/
|
2022
|
+
abortSignal?: AbortSignal;
|
2023
|
+
/**
|
2024
|
+
Additional headers to include in the request.
|
2025
|
+
Only applicable for HTTP-based providers.
|
2026
|
+
*/
|
2027
|
+
headers?: Record<string, string>;
|
2028
|
+
/**
|
2029
|
+
* Optional telemetry configuration (experimental).
|
2030
|
+
*/
|
2031
|
+
experimental_telemetry?: TelemetrySettings;
|
2032
|
+
/**
|
2033
|
+
Additional provider-specific options. They are passed through
|
2034
|
+
to the provider from the AI SDK and enable provider-specific
|
2035
|
+
functionality that can be fully encapsulated in the provider.
|
2036
|
+
*/
|
2037
|
+
providerOptions?: ProviderOptions;
|
2038
|
+
/**
|
2039
|
+
* Maximum number of concurrent requests.
|
2040
|
+
*
|
2041
|
+
* @default Infinity
|
2042
|
+
*/
|
2043
|
+
maxParallelCalls?: number;
|
2044
|
+
}): Promise<EmbedManyResult<VALUE>>;
|
2045
|
+
|
2046
|
+
declare const symbol$c: unique symbol;
|
2047
|
+
declare class InvalidArgumentError extends AISDKError {
|
2048
|
+
private readonly [symbol$c];
|
2049
|
+
readonly parameter: string;
|
2050
|
+
readonly value: unknown;
|
2051
|
+
constructor({ parameter, value, message, }: {
|
2052
|
+
parameter: string;
|
2053
|
+
value: unknown;
|
2054
|
+
message: string;
|
2055
|
+
});
|
2056
|
+
static isInstance(error: unknown): error is InvalidArgumentError;
|
2057
|
+
}
|
2058
|
+
|
2059
|
+
type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
2060
|
+
type: 'text-start';
|
2061
|
+
providerMetadata?: ProviderMetadata;
|
2062
|
+
id: string;
|
1645
2063
|
} | {
|
1646
2064
|
type: 'text-delta';
|
1647
2065
|
id: string;
|
@@ -1889,480 +2307,299 @@ declare class RetryError extends AISDKError {
|
|
1889
2307
|
static isInstance(error: unknown): error is RetryError;
|
1890
2308
|
}
|
1891
2309
|
|
1892
|
-
|
1893
|
-
|
1894
|
-
|
1895
|
-
|
1896
|
-
|
1897
|
-
response: ServerResponse;
|
1898
|
-
textStream: ReadableStream<string>;
|
1899
|
-
} & ResponseInit): void;
|
1900
|
-
|
1901
|
-
declare const getOriginalFetch: () => typeof fetch;
|
1902
|
-
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
1903
|
-
api: string;
|
1904
|
-
prompt: string;
|
1905
|
-
credentials: RequestCredentials | undefined;
|
1906
|
-
headers: HeadersInit | undefined;
|
1907
|
-
body: Record<string, any>;
|
1908
|
-
streamProtocol: 'data' | 'text' | undefined;
|
1909
|
-
setCompletion: (completion: string) => void;
|
1910
|
-
setLoading: (loading: boolean) => void;
|
1911
|
-
setError: (error: Error | undefined) => void;
|
1912
|
-
setAbortController: (abortController: AbortController | null) => void;
|
1913
|
-
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
1914
|
-
onError: ((error: Error) => void) | undefined;
|
1915
|
-
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
1916
|
-
}): Promise<string | null | undefined>;
|
1917
|
-
|
1918
|
-
interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
|
2310
|
+
/**
|
2311
|
+
The result of a `generateImage` call.
|
2312
|
+
It contains the images and additional information.
|
2313
|
+
*/
|
2314
|
+
interface GenerateImageResult {
|
1919
2315
|
/**
|
1920
|
-
|
2316
|
+
The first image that was generated.
|
1921
2317
|
*/
|
1922
|
-
|
2318
|
+
readonly image: GeneratedFile;
|
1923
2319
|
/**
|
1924
|
-
|
2320
|
+
The images that were generated.
|
2321
|
+
*/
|
2322
|
+
readonly images: Array<GeneratedFile>;
|
2323
|
+
/**
|
2324
|
+
Warnings for the call, e.g. unsupported settings.
|
2325
|
+
*/
|
2326
|
+
readonly warnings: Array<ImageGenerationWarning>;
|
2327
|
+
/**
|
2328
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
1925
2329
|
*/
|
1926
|
-
|
2330
|
+
readonly responses: Array<ImageModelResponseMetadata>;
|
1927
2331
|
/**
|
1928
|
-
*
|
1929
|
-
*
|
1930
|
-
* to prevent duplicated error masking.
|
2332
|
+
* Provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific
|
2333
|
+
* results that can be fully encapsulated in the provider.
|
1931
2334
|
*/
|
1932
|
-
|
2335
|
+
readonly providerMetadata: ImageModelProviderMetadata;
|
1933
2336
|
}
|
1934
2337
|
|
1935
|
-
declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
|
1936
|
-
execute: (options: {
|
1937
|
-
writer: UIMessageStreamWriter<UI_MESSAGE>;
|
1938
|
-
}) => Promise<void> | void;
|
1939
|
-
onError?: (error: unknown) => string;
|
1940
|
-
/**
|
1941
|
-
* The original messages. If they are provided, persistence mode is assumed,
|
1942
|
-
* and a message ID is provided for the response message.
|
1943
|
-
*/
|
1944
|
-
originalMessages?: UI_MESSAGE[];
|
1945
|
-
onFinish?: (options: {
|
1946
|
-
/**
|
1947
|
-
* The updates list of UI messages.
|
1948
|
-
*/
|
1949
|
-
messages: UI_MESSAGE[];
|
1950
|
-
/**
|
1951
|
-
* Indicates whether the response message is a continuation of the last original message,
|
1952
|
-
* or if a new message was created.
|
1953
|
-
*/
|
1954
|
-
isContinuation: boolean;
|
1955
|
-
/**
|
1956
|
-
* The message that was sent to the client as a response
|
1957
|
-
* (including the original message if it was extended).
|
1958
|
-
*/
|
1959
|
-
responseMessage: UI_MESSAGE;
|
1960
|
-
}) => void;
|
1961
|
-
generateId?: IdGenerator;
|
1962
|
-
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
1963
|
-
|
1964
|
-
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
|
1965
|
-
stream: ReadableStream<UIMessageChunk>;
|
1966
|
-
}): Response;
|
1967
|
-
|
1968
|
-
declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
|
1969
|
-
constructor();
|
1970
|
-
}
|
1971
|
-
|
1972
|
-
declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
|
1973
|
-
response: ServerResponse;
|
1974
|
-
stream: ReadableStream<UIMessageChunk>;
|
1975
|
-
} & UIMessageStreamResponseInit): void;
|
1976
|
-
|
1977
2338
|
/**
|
1978
|
-
|
1979
|
-
*
|
1980
|
-
* @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
|
1981
|
-
* @param options.stream - The stream of `UIMessageChunk`s to read.
|
1982
|
-
*
|
1983
|
-
* @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
|
1984
|
-
* as it is being completed.
|
1985
|
-
*/
|
1986
|
-
declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, }: {
|
1987
|
-
message?: UI_MESSAGE;
|
1988
|
-
stream: ReadableStream<UIMessageChunk>;
|
1989
|
-
}): AsyncIterableStream<UI_MESSAGE>;
|
1990
|
-
|
1991
|
-
declare const UI_MESSAGE_STREAM_HEADERS: {
|
1992
|
-
'content-type': string;
|
1993
|
-
'cache-control': string;
|
1994
|
-
connection: string;
|
1995
|
-
'x-vercel-ai-ui-message-stream': string;
|
1996
|
-
'x-accel-buffering': string;
|
1997
|
-
};
|
2339
|
+
Generates images using an image model.
|
1998
2340
|
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
|
2005
|
-
|
2006
|
-
|
2007
|
-
|
2008
|
-
|
2009
|
-
|
2010
|
-
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
2011
|
-
}
|
2341
|
+
@param model - The image model to use.
|
2342
|
+
@param prompt - The prompt that should be used to generate the image.
|
2343
|
+
@param n - Number of images to generate. Default: 1.
|
2344
|
+
@param size - Size of the images to generate. Must have the format `{width}x{height}`.
|
2345
|
+
@param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
|
2346
|
+
@param seed - Seed for the image generation.
|
2347
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
2348
|
+
as body parameters.
|
2349
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2350
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2351
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2012
2352
|
|
2013
|
-
|
2014
|
-
|
2015
|
-
|
2016
|
-
};
|
2017
|
-
type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
2018
|
-
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
2019
|
-
[K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
|
2020
|
-
};
|
2021
|
-
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
2022
|
-
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
2023
|
-
};
|
2024
|
-
type ChatRequestOptions = {
|
2353
|
+
@returns A result object that contains the generated images.
|
2354
|
+
*/
|
2355
|
+
declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
2025
2356
|
/**
|
2026
|
-
|
2357
|
+
The image model to use.
|
2358
|
+
*/
|
2359
|
+
model: ImageModelV2;
|
2360
|
+
/**
|
2361
|
+
The prompt that should be used to generate the image.
|
2027
2362
|
*/
|
2028
|
-
|
2363
|
+
prompt: string;
|
2029
2364
|
/**
|
2030
|
-
|
2365
|
+
Number of images to generate.
|
2031
2366
|
*/
|
2032
|
-
|
2033
|
-
metadata?: unknown;
|
2034
|
-
};
|
2035
|
-
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
2036
|
-
interface ChatState<UI_MESSAGE extends UIMessage> {
|
2037
|
-
status: ChatStatus;
|
2038
|
-
error: Error | undefined;
|
2039
|
-
messages: UI_MESSAGE[];
|
2040
|
-
pushMessage: (message: UI_MESSAGE) => void;
|
2041
|
-
popMessage: () => void;
|
2042
|
-
replaceMessage: (index: number, message: UI_MESSAGE) => void;
|
2043
|
-
snapshot: <T>(thing: T) => T;
|
2044
|
-
}
|
2045
|
-
type ChatOnErrorCallback = (error: Error) => void;
|
2046
|
-
type ChatOnToolCallCallback = ({ toolCall, }: {
|
2047
|
-
toolCall: ToolCall<string, unknown>;
|
2048
|
-
}) => void | Promise<unknown> | unknown;
|
2049
|
-
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
2050
|
-
type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
|
2051
|
-
message: UI_MESSAGE;
|
2052
|
-
}) => void;
|
2053
|
-
interface ChatInit<UI_MESSAGE extends UIMessage> {
|
2367
|
+
n?: number;
|
2054
2368
|
/**
|
2055
|
-
|
2056
|
-
* generated.
|
2369
|
+
Number of images to generate.
|
2057
2370
|
*/
|
2058
|
-
|
2059
|
-
messageMetadataSchema?: Validator<InferUIMessageMetadata<UI_MESSAGE>> | StandardSchemaV1<InferUIMessageMetadata<UI_MESSAGE>>;
|
2060
|
-
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
2061
|
-
messages?: UI_MESSAGE[];
|
2371
|
+
maxImagesPerCall?: number;
|
2062
2372
|
/**
|
2063
|
-
|
2064
|
-
* If not provided the default AI SDK `generateId` is used.
|
2373
|
+
Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
|
2065
2374
|
*/
|
2066
|
-
|
2067
|
-
transport?: ChatTransport<UI_MESSAGE>;
|
2068
|
-
maxSteps?: number;
|
2375
|
+
size?: `${number}x${number}`;
|
2069
2376
|
/**
|
2070
|
-
|
2377
|
+
Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
|
2071
2378
|
*/
|
2072
|
-
|
2379
|
+
aspectRatio?: `${number}:${number}`;
|
2073
2380
|
/**
|
2074
|
-
|
2075
|
-
|
2381
|
+
Seed for the image generation. If not provided, the default seed will be used.
|
2382
|
+
*/
|
2383
|
+
seed?: number;
|
2384
|
+
/**
|
2385
|
+
Additional provider-specific options that are passed through to the provider
|
2386
|
+
as body parameters.
|
2076
2387
|
|
2077
|
-
|
2078
|
-
|
2388
|
+
The outer record is keyed by the provider name, and the inner
|
2389
|
+
record is keyed by the provider-specific metadata key.
|
2390
|
+
```ts
|
2391
|
+
{
|
2392
|
+
"openai": {
|
2393
|
+
"style": "vivid"
|
2394
|
+
}
|
2395
|
+
}
|
2396
|
+
```
|
2079
2397
|
*/
|
2080
|
-
|
2398
|
+
providerOptions?: ProviderOptions;
|
2081
2399
|
/**
|
2082
|
-
|
2083
|
-
|
2084
|
-
|
2400
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
2401
|
+
|
2402
|
+
@default 2
|
2085
2403
|
*/
|
2086
|
-
|
2404
|
+
maxRetries?: number;
|
2087
2405
|
/**
|
2088
|
-
|
2089
|
-
|
2090
|
-
|
2406
|
+
Abort signal.
|
2407
|
+
*/
|
2408
|
+
abortSignal?: AbortSignal;
|
2409
|
+
/**
|
2410
|
+
Additional headers to include in the request.
|
2411
|
+
Only applicable for HTTP-based providers.
|
2412
|
+
*/
|
2413
|
+
headers?: Record<string, string>;
|
2414
|
+
}): Promise<GenerateImageResult>;
|
2415
|
+
|
2416
|
+
/**
|
2417
|
+
The result of a `generateObject` call.
|
2418
|
+
*/
|
2419
|
+
interface GenerateObjectResult<OBJECT> {
|
2420
|
+
/**
|
2421
|
+
The generated object (typed according to the schema).
|
2422
|
+
*/
|
2423
|
+
readonly object: OBJECT;
|
2424
|
+
/**
|
2425
|
+
The reason why the generation finished.
|
2426
|
+
*/
|
2427
|
+
readonly finishReason: FinishReason;
|
2428
|
+
/**
|
2429
|
+
The token usage of the generated text.
|
2430
|
+
*/
|
2431
|
+
readonly usage: LanguageModelUsage;
|
2432
|
+
/**
|
2433
|
+
Warnings from the model provider (e.g. unsupported settings).
|
2434
|
+
*/
|
2435
|
+
readonly warnings: CallWarning[] | undefined;
|
2436
|
+
/**
|
2437
|
+
Additional request information.
|
2091
2438
|
*/
|
2092
|
-
|
2093
|
-
}
|
2094
|
-
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
2095
|
-
readonly id: string;
|
2096
|
-
readonly generateId: IdGenerator;
|
2097
|
-
protected state: ChatState<UI_MESSAGE>;
|
2098
|
-
private messageMetadataSchema;
|
2099
|
-
private dataPartSchemas;
|
2100
|
-
private readonly transport;
|
2101
|
-
private maxSteps;
|
2102
|
-
private onError?;
|
2103
|
-
private onToolCall?;
|
2104
|
-
private onFinish?;
|
2105
|
-
private onData?;
|
2106
|
-
private activeResponse;
|
2107
|
-
private jobExecutor;
|
2108
|
-
constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
2109
|
-
state: ChatState<UI_MESSAGE>;
|
2110
|
-
});
|
2439
|
+
readonly request: LanguageModelRequestMetadata;
|
2111
2440
|
/**
|
2112
|
-
|
2113
|
-
*
|
2114
|
-
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
2115
|
-
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
2116
|
-
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
2117
|
-
* - `error`: An error occurred during the API request, preventing successful completion.
|
2441
|
+
Additional response information.
|
2118
2442
|
*/
|
2119
|
-
|
2120
|
-
|
2121
|
-
|
2122
|
-
|
2123
|
-
|
2124
|
-
|
2125
|
-
get messages(): UI_MESSAGE[];
|
2126
|
-
get lastMessage(): UI_MESSAGE | undefined;
|
2127
|
-
set messages(messages: UI_MESSAGE[]);
|
2443
|
+
readonly response: LanguageModelResponseMetadata & {
|
2444
|
+
/**
|
2445
|
+
Response body (available only for providers that use HTTP requests).
|
2446
|
+
*/
|
2447
|
+
body?: unknown;
|
2448
|
+
};
|
2128
2449
|
/**
|
2129
|
-
|
2130
|
-
|
2131
|
-
|
2132
|
-
* If a messageId is provided, the message will be replaced.
|
2450
|
+
Additional provider-specific metadata. They are passed through
|
2451
|
+
from the provider to the AI SDK and enable provider-specific
|
2452
|
+
results that can be fully encapsulated in the provider.
|
2133
2453
|
*/
|
2134
|
-
|
2135
|
-
text?: never;
|
2136
|
-
files?: never;
|
2137
|
-
messageId?: string;
|
2138
|
-
}) | {
|
2139
|
-
text: string;
|
2140
|
-
files?: FileList | FileUIPart[];
|
2141
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
2142
|
-
parts?: never;
|
2143
|
-
messageId?: string;
|
2144
|
-
} | {
|
2145
|
-
files: FileList | FileUIPart[];
|
2146
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
2147
|
-
parts?: never;
|
2148
|
-
messageId?: string;
|
2149
|
-
}, options?: ChatRequestOptions) => Promise<void>;
|
2150
|
-
/**
|
2151
|
-
* Regenerate the assistant message with the provided message id.
|
2152
|
-
* If no message id is provided, the last assistant message will be regenerated.
|
2153
|
-
*/
|
2154
|
-
regenerate: ({ messageId, ...options }?: {
|
2155
|
-
messageId?: string;
|
2156
|
-
} & ChatRequestOptions) => Promise<void>;
|
2157
|
-
/**
|
2158
|
-
* Attempt to resume an ongoing streaming response.
|
2159
|
-
*/
|
2160
|
-
resumeStream: (options?: ChatRequestOptions) => Promise<void>;
|
2161
|
-
addToolResult: ({ toolCallId, output, }: {
|
2162
|
-
toolCallId: string;
|
2163
|
-
output: unknown;
|
2164
|
-
}) => Promise<void>;
|
2454
|
+
readonly providerMetadata: ProviderMetadata | undefined;
|
2165
2455
|
/**
|
2166
|
-
|
2167
|
-
|
2168
|
-
|
2169
|
-
|
2456
|
+
Converts the object to a JSON response.
|
2457
|
+
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
2458
|
+
*/
|
2459
|
+
toJsonResponse(init?: ResponseInit): Response;
|
2170
2460
|
}
|
2171
2461
|
|
2172
|
-
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
2173
|
-
|
2174
2462
|
/**
|
2175
|
-
|
2176
|
-
|
2463
|
+
A function that attempts to repair the raw output of the mode
|
2464
|
+
to enable JSON parsing.
|
2177
2465
|
|
2178
|
-
|
2179
|
-
|
2180
|
-
|
2181
|
-
|
2182
|
-
|
2183
|
-
|
2184
|
-
ignoreIncompleteToolCalls?: boolean;
|
2185
|
-
}): ModelMessage[];
|
2466
|
+
Should return the repaired text or null if the text cannot be repaired.
|
2467
|
+
*/
|
2468
|
+
type RepairTextFunction = (options: {
|
2469
|
+
text: string;
|
2470
|
+
error: JSONParseError | TypeValidationError;
|
2471
|
+
}) => Promise<string | null>;
|
2186
2472
|
/**
|
2187
|
-
|
2188
|
-
*/
|
2189
|
-
declare const convertToCoreMessages: typeof convertToModelMessages;
|
2473
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
2190
2474
|
|
2191
|
-
|
2192
|
-
|
2193
|
-
|
2194
|
-
|
2195
|
-
|
2196
|
-
|
2197
|
-
|
2198
|
-
|
2199
|
-
|
2200
|
-
|
2201
|
-
|
2202
|
-
|
2203
|
-
|
2204
|
-
|
2205
|
-
|
2206
|
-
|
2207
|
-
|
2208
|
-
|
2209
|
-
|
2210
|
-
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
|
2223
|
-
|
2224
|
-
|
2225
|
-
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2230
|
-
|
2475
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
2476
|
+
|
2477
|
+
@param model - The language model to use.
|
2478
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2479
|
+
|
2480
|
+
@param system - A system message that will be part of the prompt.
|
2481
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
2482
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
2483
|
+
|
2484
|
+
@param maxOutputTokens - Maximum number of tokens to generate.
|
2485
|
+
@param temperature - Temperature setting.
|
2486
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2487
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2488
|
+
@param topP - Nucleus sampling.
|
2489
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2490
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2491
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
2492
|
+
Used to remove "long tail" low probability responses.
|
2493
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
2494
|
+
@param presencePenalty - Presence penalty setting.
|
2495
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
2496
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2497
|
+
@param frequencyPenalty - Frequency penalty setting.
|
2498
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
2499
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2500
|
+
@param stopSequences - Stop sequences.
|
2501
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
2502
|
+
@param seed - The seed (integer) to use for random sampling.
|
2503
|
+
If set and supported by the model, calls will generate deterministic results.
|
2504
|
+
|
2505
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2506
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2507
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2508
|
+
|
2509
|
+
@param schema - The schema of the object that the model should generate.
|
2510
|
+
@param schemaName - Optional name of the output that should be generated.
|
2511
|
+
Used by some providers for additional LLM guidance, e.g.
|
2512
|
+
via tool or schema name.
|
2513
|
+
@param schemaDescription - Optional description of the output that should be generated.
|
2514
|
+
Used by some providers for additional LLM guidance, e.g.
|
2515
|
+
via tool or schema description.
|
2516
|
+
|
2517
|
+
@param output - The type of the output.
|
2518
|
+
|
2519
|
+
- 'object': The output is an object.
|
2520
|
+
- 'array': The output is an array.
|
2521
|
+
- 'enum': The output is an enum.
|
2522
|
+
- 'no-schema': The output is not a schema.
|
2523
|
+
|
2524
|
+
@param experimental_repairText - A function that attempts to repair the raw output of the mode
|
2525
|
+
to enable JSON parsing.
|
2526
|
+
|
2527
|
+
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
2528
|
+
|
2529
|
+
@param providerOptions - Additional provider-specific options. They are passed through
|
2530
|
+
to the provider from the AI SDK and enable provider-specific
|
2531
|
+
functionality that can be fully encapsulated in the provider.
|
2532
|
+
|
2533
|
+
@returns
|
2534
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
2535
|
+
*/
|
2536
|
+
declare function generateObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
2231
2537
|
/**
|
2232
|
-
|
2233
|
-
|
2234
|
-
|
2235
|
-
|
2236
|
-
|
2538
|
+
The enum values that the model should use.
|
2539
|
+
*/
|
2540
|
+
enum: Array<RESULT>;
|
2541
|
+
mode?: 'json';
|
2542
|
+
output: 'enum';
|
2543
|
+
} : OUTPUT extends 'no-schema' ? {} : {
|
2237
2544
|
/**
|
2238
|
-
|
2239
|
-
|
2240
|
-
|
2545
|
+
The schema of the object that the model should generate.
|
2546
|
+
*/
|
2547
|
+
schema: SCHEMA;
|
2241
2548
|
/**
|
2242
|
-
|
2243
|
-
|
2244
|
-
|
2245
|
-
|
2246
|
-
|
2247
|
-
* body: {
|
2248
|
-
* sessionId: '123',
|
2249
|
-
* }
|
2250
|
-
* })
|
2251
|
-
* ```
|
2252
|
-
*/
|
2253
|
-
body?: object;
|
2549
|
+
Optional name of the output that should be generated.
|
2550
|
+
Used by some providers for additional LLM guidance, e.g.
|
2551
|
+
via tool or schema name.
|
2552
|
+
*/
|
2553
|
+
schemaName?: string;
|
2254
2554
|
/**
|
2255
|
-
|
2256
|
-
|
2257
|
-
|
2258
|
-
|
2555
|
+
Optional description of the output that should be generated.
|
2556
|
+
Used by some providers for additional LLM guidance, e.g.
|
2557
|
+
via tool or schema description.
|
2558
|
+
*/
|
2559
|
+
schemaDescription?: string;
|
2259
2560
|
/**
|
2260
|
-
|
2261
|
-
* to prepare the request body for the chat API. This can be useful for
|
2262
|
-
* customizing the request body based on the messages and data in the chat.
|
2263
|
-
*
|
2264
|
-
* @param id The id of the chat.
|
2265
|
-
* @param messages The current messages in the chat.
|
2266
|
-
* @param requestBody The request body object passed in the chat request.
|
2267
|
-
*/
|
2268
|
-
prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
2269
|
-
prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
2270
|
-
};
|
2271
|
-
declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
|
2272
|
-
protected api: string;
|
2273
|
-
protected credentials?: RequestCredentials;
|
2274
|
-
protected headers?: Record<string, string> | Headers;
|
2275
|
-
protected body?: object;
|
2276
|
-
protected fetch?: FetchFunction;
|
2277
|
-
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
2278
|
-
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
2279
|
-
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
2280
|
-
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
2281
|
-
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
2282
|
-
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
2283
|
-
}
|
2561
|
+
The mode to use for object generation.
|
2284
2562
|
|
2285
|
-
|
2286
|
-
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
2287
|
-
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
2288
|
-
}
|
2563
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
2289
2564
|
|
2290
|
-
|
2291
|
-
|
2292
|
-
|
2293
|
-
}
|
2565
|
+
- 'auto': The provider will choose the best mode for the model.
|
2566
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
2567
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
2294
2568
|
|
2295
|
-
|
2569
|
+
Please note that most providers do not support all modes.
|
2570
|
+
|
2571
|
+
Default and recommended: 'auto' (best mode for the model).
|
2572
|
+
*/
|
2573
|
+
mode?: 'auto' | 'json' | 'tool';
|
2574
|
+
}) & {
|
2575
|
+
output?: OUTPUT;
|
2296
2576
|
/**
|
2297
|
-
|
2577
|
+
The language model to use.
|
2298
2578
|
*/
|
2299
|
-
|
2579
|
+
model: LanguageModel;
|
2300
2580
|
/**
|
2301
|
-
|
2302
|
-
|
2303
|
-
body?: object;
|
2304
|
-
};
|
2305
|
-
type UseCompletionOptions = {
|
2306
|
-
/**
|
2307
|
-
* The API endpoint that accepts a `{ prompt: string }` object and returns
|
2308
|
-
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
|
2309
|
-
*/
|
2310
|
-
api?: string;
|
2311
|
-
/**
|
2312
|
-
* An unique identifier for the chat. If not provided, a random one will be
|
2313
|
-
* generated. When provided, the `useChat` hook with the same `id` will
|
2314
|
-
* have shared states across components.
|
2315
|
-
*/
|
2316
|
-
id?: string;
|
2317
|
-
/**
|
2318
|
-
* Initial prompt input of the completion.
|
2319
|
-
*/
|
2320
|
-
initialInput?: string;
|
2321
|
-
/**
|
2322
|
-
* Initial completion result. Useful to load an existing history.
|
2323
|
-
*/
|
2324
|
-
initialCompletion?: string;
|
2325
|
-
/**
|
2326
|
-
* Callback function to be called when the completion is finished streaming.
|
2327
|
-
*/
|
2328
|
-
onFinish?: (prompt: string, completion: string) => void;
|
2329
|
-
/**
|
2330
|
-
* Callback function to be called when an error is encountered.
|
2581
|
+
A function that attempts to repair the raw output of the mode
|
2582
|
+
to enable JSON parsing.
|
2331
2583
|
*/
|
2332
|
-
|
2584
|
+
experimental_repairText?: RepairTextFunction;
|
2333
2585
|
/**
|
2334
|
-
|
2335
|
-
|
2336
|
-
|
2337
|
-
*/
|
2338
|
-
credentials?: RequestCredentials;
|
2586
|
+
Optional telemetry configuration (experimental).
|
2587
|
+
*/
|
2588
|
+
experimental_telemetry?: TelemetrySettings;
|
2339
2589
|
/**
|
2340
|
-
|
2341
|
-
|
2342
|
-
|
2590
|
+
Additional provider-specific options. They are passed through
|
2591
|
+
to the provider from the AI SDK and enable provider-specific
|
2592
|
+
functionality that can be fully encapsulated in the provider.
|
2593
|
+
*/
|
2594
|
+
providerOptions?: ProviderOptions;
|
2343
2595
|
/**
|
2344
|
-
*
|
2345
|
-
* @example
|
2346
|
-
* Send a `sessionId` to the API along with the prompt.
|
2347
|
-
* ```js
|
2348
|
-
* useChat({
|
2349
|
-
* body: {
|
2350
|
-
* sessionId: '123',
|
2351
|
-
* }
|
2352
|
-
* })
|
2353
|
-
* ```
|
2596
|
+
* Internal. For test use only. May change without notice.
|
2354
2597
|
*/
|
2355
|
-
|
2356
|
-
|
2357
|
-
|
2358
|
-
|
2359
|
-
|
2360
|
-
/**
|
2361
|
-
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
2362
|
-
or to provide a custom fetch implementation for e.g. testing.
|
2363
|
-
*/
|
2364
|
-
fetch?: FetchFunction;
|
2365
|
-
};
|
2598
|
+
_internal?: {
|
2599
|
+
generateId?: () => string;
|
2600
|
+
currentDate?: () => Date;
|
2601
|
+
};
|
2602
|
+
}): Promise<GenerateObjectResult<RESULT>>;
|
2366
2603
|
|
2367
2604
|
/**
|
2368
2605
|
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
@@ -2425,257 +2662,137 @@ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDela
|
|
2425
2662
|
}): ReadableStream<T>;
|
2426
2663
|
|
2427
2664
|
/**
|
2428
|
-
The result of
|
2429
|
-
It contains the embedding, the value, and additional information.
|
2665
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
2430
2666
|
*/
|
2431
|
-
interface
|
2432
|
-
/**
|
2433
|
-
The value that was embedded.
|
2434
|
-
*/
|
2435
|
-
readonly value: VALUE;
|
2436
|
-
/**
|
2437
|
-
The embedding of the value.
|
2438
|
-
*/
|
2439
|
-
readonly embedding: Embedding;
|
2440
|
-
/**
|
2441
|
-
The embedding token usage.
|
2442
|
-
*/
|
2443
|
-
readonly usage: EmbeddingModelUsage;
|
2667
|
+
interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
2444
2668
|
/**
|
2445
|
-
|
2669
|
+
Warnings from the model provider (e.g. unsupported settings)
|
2446
2670
|
*/
|
2447
|
-
readonly
|
2448
|
-
/**
|
2449
|
-
Response headers.
|
2450
|
-
*/
|
2451
|
-
headers?: Record<string, string>;
|
2452
|
-
/**
|
2453
|
-
The response body.
|
2454
|
-
*/
|
2455
|
-
body?: unknown;
|
2456
|
-
};
|
2457
|
-
}
|
2458
|
-
|
2459
|
-
/**
|
2460
|
-
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
2461
|
-
|
2462
|
-
@param model - The embedding model to use.
|
2463
|
-
@param value - The value that should be embedded.
|
2464
|
-
|
2465
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2466
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2467
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2468
|
-
|
2469
|
-
@returns A result object that contains the embedding, the value, and additional information.
|
2470
|
-
*/
|
2471
|
-
declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
2671
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
2472
2672
|
/**
|
2473
|
-
|
2673
|
+
The token usage of the generated response. Resolved when the response is finished.
|
2474
2674
|
*/
|
2475
|
-
|
2476
|
-
/**
|
2477
|
-
The value that should be embedded.
|
2478
|
-
*/
|
2479
|
-
value: VALUE;
|
2675
|
+
readonly usage: Promise<LanguageModelUsage>;
|
2480
2676
|
/**
|
2481
|
-
|
2482
|
-
|
2483
|
-
|
2677
|
+
Additional provider-specific metadata. They are passed through
|
2678
|
+
from the provider to the AI SDK and enable provider-specific
|
2679
|
+
results that can be fully encapsulated in the provider.
|
2484
2680
|
*/
|
2485
|
-
|
2681
|
+
readonly providerMetadata: Promise<ProviderMetadata | undefined>;
|
2486
2682
|
/**
|
2487
|
-
|
2683
|
+
Additional request information from the last step.
|
2488
2684
|
*/
|
2489
|
-
|
2685
|
+
readonly request: Promise<LanguageModelRequestMetadata>;
|
2490
2686
|
/**
|
2491
|
-
Additional
|
2492
|
-
Only applicable for HTTP-based providers.
|
2687
|
+
Additional response information.
|
2493
2688
|
*/
|
2494
|
-
|
2689
|
+
readonly response: Promise<LanguageModelResponseMetadata>;
|
2495
2690
|
/**
|
2496
|
-
|
2497
|
-
|
2498
|
-
|
2499
|
-
*/
|
2500
|
-
providerOptions?: ProviderOptions;
|
2691
|
+
The generated object (typed according to the schema). Resolved when the response is finished.
|
2692
|
+
*/
|
2693
|
+
readonly object: Promise<RESULT>;
|
2501
2694
|
/**
|
2502
|
-
|
2695
|
+
Stream of partial objects. It gets more complete as the stream progresses.
|
2696
|
+
|
2697
|
+
Note that the partial object is not validated.
|
2698
|
+
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
2699
|
+
*/
|
2700
|
+
readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
|
2701
|
+
/**
|
2702
|
+
* Stream over complete array elements. Only available if the output strategy is set to `array`.
|
2503
2703
|
*/
|
2504
|
-
|
2505
|
-
}): Promise<EmbedResult<VALUE>>;
|
2506
|
-
|
2507
|
-
/**
|
2508
|
-
The result of a `embedMany` call.
|
2509
|
-
It contains the embeddings, the values, and additional information.
|
2510
|
-
*/
|
2511
|
-
interface EmbedManyResult<VALUE> {
|
2704
|
+
readonly elementStream: ELEMENT_STREAM;
|
2512
2705
|
/**
|
2513
|
-
|
2706
|
+
Text stream of the JSON representation of the generated object. It contains text chunks.
|
2707
|
+
When the stream is finished, the object is valid JSON that can be parsed.
|
2514
2708
|
*/
|
2515
|
-
readonly
|
2516
|
-
/**
|
2517
|
-
The embeddings. They are in the same order as the values.
|
2518
|
-
*/
|
2519
|
-
readonly embeddings: Array<Embedding>;
|
2709
|
+
readonly textStream: AsyncIterableStream<string>;
|
2520
2710
|
/**
|
2521
|
-
|
2522
|
-
|
2523
|
-
|
2711
|
+
Stream of different types of events, including partial objects, errors, and finish events.
|
2712
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
2713
|
+
*/
|
2714
|
+
readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
|
2524
2715
|
/**
|
2525
|
-
|
2716
|
+
Writes text delta output to a Node.js response-like object.
|
2717
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2718
|
+
writes each text delta as a separate chunk.
|
2719
|
+
|
2720
|
+
@param response A Node.js response-like object (ServerResponse).
|
2721
|
+
@param init Optional headers, status code, and status text.
|
2526
2722
|
*/
|
2527
|
-
|
2528
|
-
|
2529
|
-
|
2723
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
2724
|
+
/**
|
2725
|
+
Creates a simple text stream response.
|
2726
|
+
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
2727
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
2728
|
+
Non-text-delta events are ignored.
|
2729
|
+
|
2730
|
+
@param init Optional headers, status code, and status text.
|
2530
2731
|
*/
|
2531
|
-
|
2532
|
-
/**
|
2533
|
-
The response body.
|
2534
|
-
*/
|
2535
|
-
body?: unknown;
|
2536
|
-
} | undefined>;
|
2732
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
2537
2733
|
}
|
2734
|
+
type ObjectStreamPart<PARTIAL> = {
|
2735
|
+
type: 'object';
|
2736
|
+
object: PARTIAL;
|
2737
|
+
} | {
|
2738
|
+
type: 'text-delta';
|
2739
|
+
textDelta: string;
|
2740
|
+
} | {
|
2741
|
+
type: 'error';
|
2742
|
+
error: unknown;
|
2743
|
+
} | {
|
2744
|
+
type: 'finish';
|
2745
|
+
finishReason: FinishReason;
|
2746
|
+
usage: LanguageModelUsage;
|
2747
|
+
response: LanguageModelResponseMetadata;
|
2748
|
+
providerMetadata?: ProviderMetadata;
|
2749
|
+
};
|
2538
2750
|
|
2539
2751
|
/**
|
2540
|
-
|
2541
|
-
by the embedding model.
|
2542
|
-
|
2543
|
-
`embedMany` automatically splits large requests into smaller chunks if the model
|
2544
|
-
has a limit on how many embeddings can be generated in a single call.
|
2545
|
-
|
2546
|
-
@param model - The embedding model to use.
|
2547
|
-
@param values - The values that should be embedded.
|
2752
|
+
Callback that is set using the `onError` option.
|
2548
2753
|
|
2549
|
-
@param
|
2550
|
-
|
2551
|
-
|
2754
|
+
@param event - The event that is passed to the callback.
|
2755
|
+
*/
|
2756
|
+
type StreamObjectOnErrorCallback = (event: {
|
2757
|
+
error: unknown;
|
2758
|
+
}) => Promise<void> | void;
|
2759
|
+
/**
|
2760
|
+
Callback that is set using the `onFinish` option.
|
2552
2761
|
|
2553
|
-
@
|
2762
|
+
@param event - The event that is passed to the callback.
|
2554
2763
|
*/
|
2555
|
-
|
2764
|
+
type StreamObjectOnFinishCallback<RESULT> = (event: {
|
2556
2765
|
/**
|
2557
|
-
The
|
2558
|
-
|
2559
|
-
|
2766
|
+
The token usage of the generated response.
|
2767
|
+
*/
|
2768
|
+
usage: LanguageModelUsage;
|
2560
2769
|
/**
|
2561
|
-
The
|
2562
|
-
|
2563
|
-
|
2564
|
-
/**
|
2565
|
-
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
2566
|
-
|
2567
|
-
@default 2
|
2568
|
-
*/
|
2569
|
-
maxRetries?: number;
|
2770
|
+
The generated object. Can be undefined if the final object does not match the schema.
|
2771
|
+
*/
|
2772
|
+
object: RESULT | undefined;
|
2570
2773
|
/**
|
2571
|
-
|
2572
|
-
|
2573
|
-
|
2774
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
2775
|
+
*/
|
2776
|
+
error: unknown | undefined;
|
2574
2777
|
/**
|
2575
|
-
|
2576
|
-
Only applicable for HTTP-based providers.
|
2778
|
+
Response metadata.
|
2577
2779
|
*/
|
2578
|
-
|
2579
|
-
/**
|
2580
|
-
* Optional telemetry configuration (experimental).
|
2581
|
-
*/
|
2582
|
-
experimental_telemetry?: TelemetrySettings;
|
2583
|
-
/**
|
2584
|
-
Additional provider-specific options. They are passed through
|
2585
|
-
to the provider from the AI SDK and enable provider-specific
|
2586
|
-
functionality that can be fully encapsulated in the provider.
|
2587
|
-
*/
|
2588
|
-
providerOptions?: ProviderOptions;
|
2589
|
-
/**
|
2590
|
-
* Maximum number of concurrent requests.
|
2591
|
-
*
|
2592
|
-
* @default Infinity
|
2593
|
-
*/
|
2594
|
-
maxParallelCalls?: number;
|
2595
|
-
}): Promise<EmbedManyResult<VALUE>>;
|
2596
|
-
|
2597
|
-
/**
|
2598
|
-
* Detects the first chunk in a buffer.
|
2599
|
-
*
|
2600
|
-
* @param buffer - The buffer to detect the first chunk in.
|
2601
|
-
*
|
2602
|
-
* @returns The first detected chunk, or `undefined` if no chunk was detected.
|
2603
|
-
*/
|
2604
|
-
type ChunkDetector = (buffer: string) => string | undefined | null;
|
2605
|
-
/**
|
2606
|
-
* Smooths text streaming output.
|
2607
|
-
*
|
2608
|
-
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
2609
|
-
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
2610
|
-
*
|
2611
|
-
* @returns A transform stream that smooths text streaming output.
|
2612
|
-
*/
|
2613
|
-
declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
|
2614
|
-
delayInMs?: number | null;
|
2615
|
-
chunking?: 'word' | 'line' | RegExp | ChunkDetector;
|
2616
|
-
/**
|
2617
|
-
* Internal. For test use only. May change without notice.
|
2618
|
-
*/
|
2619
|
-
_internal?: {
|
2620
|
-
delay?: (delayInMs: number | null) => Promise<void>;
|
2621
|
-
};
|
2622
|
-
}): (options: {
|
2623
|
-
tools: TOOLS;
|
2624
|
-
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2625
|
-
|
2626
|
-
/**
|
2627
|
-
A transformation that is applied to the stream.
|
2628
|
-
|
2629
|
-
@param stopStream - A function that stops the source stream.
|
2630
|
-
@param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2631
|
-
*/
|
2632
|
-
type StreamTextTransform<TOOLS extends ToolSet> = (options: {
|
2633
|
-
tools: TOOLS;
|
2634
|
-
stopStream: () => void;
|
2635
|
-
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2636
|
-
/**
|
2637
|
-
Callback that is set using the `onError` option.
|
2638
|
-
|
2639
|
-
@param event - The event that is passed to the callback.
|
2640
|
-
*/
|
2641
|
-
type StreamTextOnErrorCallback = (event: {
|
2642
|
-
error: unknown;
|
2643
|
-
}) => Promise<void> | void;
|
2644
|
-
/**
|
2645
|
-
Callback that is set using the `onStepFinish` option.
|
2646
|
-
|
2647
|
-
@param stepResult - The result of the step.
|
2648
|
-
*/
|
2649
|
-
type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
2650
|
-
/**
|
2651
|
-
Callback that is set using the `onChunk` option.
|
2652
|
-
|
2653
|
-
@param event - The event that is passed to the callback.
|
2654
|
-
*/
|
2655
|
-
type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
|
2656
|
-
chunk: Extract<TextStreamPart<TOOLS>, {
|
2657
|
-
type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
|
2658
|
-
}>;
|
2659
|
-
}) => Promise<void> | void;
|
2660
|
-
/**
|
2661
|
-
Callback that is set using the `onFinish` option.
|
2662
|
-
|
2663
|
-
@param event - The event that is passed to the callback.
|
2664
|
-
*/
|
2665
|
-
type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
2780
|
+
response: LanguageModelResponseMetadata;
|
2666
2781
|
/**
|
2667
|
-
|
2668
|
-
|
2669
|
-
|
2782
|
+
Warnings from the model provider (e.g. unsupported settings).
|
2783
|
+
*/
|
2784
|
+
warnings?: CallWarning[];
|
2670
2785
|
/**
|
2671
|
-
|
2672
|
-
|
2673
|
-
|
2786
|
+
Additional provider-specific metadata. They are passed through
|
2787
|
+
to the provider from the AI SDK and enable provider-specific
|
2788
|
+
functionality that can be fully encapsulated in the provider.
|
2789
|
+
*/
|
2790
|
+
providerMetadata: ProviderMetadata | undefined;
|
2674
2791
|
}) => Promise<void> | void;
|
2675
2792
|
/**
|
2676
|
-
Generate a
|
2793
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
2677
2794
|
|
2678
|
-
This function streams the output. If you do not want to stream the output, use `
|
2795
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
2679
2796
|
|
2680
2797
|
@param model - The language model to use.
|
2681
2798
|
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
@@ -2709,37 +2826,74 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2709
2826
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2710
2827
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2711
2828
|
|
2712
|
-
@param
|
2829
|
+
@param schema - The schema of the object that the model should generate.
|
2830
|
+
@param schemaName - Optional name of the output that should be generated.
|
2831
|
+
Used by some providers for additional LLM guidance, e.g.
|
2832
|
+
via tool or schema name.
|
2833
|
+
@param schemaDescription - Optional description of the output that should be generated.
|
2834
|
+
Used by some providers for additional LLM guidance, e.g.
|
2835
|
+
via tool or schema description.
|
2713
2836
|
|
2714
|
-
@param
|
2715
|
-
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
2716
|
-
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2717
|
-
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
2718
|
-
(for tools that have an `execute` function) are finished.
|
2837
|
+
@param output - The type of the output.
|
2719
2838
|
|
2720
|
-
|
2721
|
-
|
2839
|
+
- 'object': The output is an object.
|
2840
|
+
- 'array': The output is an array.
|
2841
|
+
- 'enum': The output is an enum.
|
2842
|
+
- 'no-schema': The output is not a schema.
|
2843
|
+
|
2844
|
+
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
2845
|
+
|
2846
|
+
@param providerOptions - Additional provider-specific options. They are passed through
|
2847
|
+
to the provider from the AI SDK and enable provider-specific
|
2848
|
+
functionality that can be fully encapsulated in the provider.
|
2849
|
+
|
2850
|
+
@returns
|
2851
|
+
A result object for accessing the partial object stream and additional information.
|
2722
2852
|
*/
|
2723
|
-
declare function
|
2853
|
+
declare function streamObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
2724
2854
|
/**
|
2725
|
-
The
|
2726
|
-
|
2727
|
-
|
2855
|
+
The enum values that the model should use.
|
2856
|
+
*/
|
2857
|
+
enum: Array<RESULT>;
|
2858
|
+
mode?: 'json';
|
2859
|
+
output: 'enum';
|
2860
|
+
} : OUTPUT extends 'no-schema' ? {} : {
|
2728
2861
|
/**
|
2729
|
-
The
|
2730
|
-
|
2731
|
-
|
2862
|
+
The schema of the object that the model should generate.
|
2863
|
+
*/
|
2864
|
+
schema: SCHEMA;
|
2732
2865
|
/**
|
2733
|
-
|
2734
|
-
|
2735
|
-
|
2866
|
+
Optional name of the output that should be generated.
|
2867
|
+
Used by some providers for additional LLM guidance, e.g.
|
2868
|
+
via tool or schema name.
|
2869
|
+
*/
|
2870
|
+
schemaName?: string;
|
2736
2871
|
/**
|
2737
|
-
|
2738
|
-
|
2872
|
+
Optional description of the output that should be generated.
|
2873
|
+
Used by some providers for additional LLM guidance, e.g.
|
2874
|
+
via tool or schema description.
|
2875
|
+
*/
|
2876
|
+
schemaDescription?: string;
|
2877
|
+
/**
|
2878
|
+
The mode to use for object generation.
|
2739
2879
|
|
2740
|
-
|
2741
|
-
|
2742
|
-
|
2880
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
2881
|
+
|
2882
|
+
- 'auto': The provider will choose the best mode for the model.
|
2883
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
2884
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
2885
|
+
|
2886
|
+
Please note that most providers do not support all modes.
|
2887
|
+
|
2888
|
+
Default and recommended: 'auto' (best mode for the model).
|
2889
|
+
*/
|
2890
|
+
mode?: 'auto' | 'json' | 'tool';
|
2891
|
+
}) & {
|
2892
|
+
output?: OUTPUT;
|
2893
|
+
/**
|
2894
|
+
The language model to use.
|
2895
|
+
*/
|
2896
|
+
model: LanguageModel;
|
2743
2897
|
/**
|
2744
2898
|
Optional telemetry configuration (experimental).
|
2745
2899
|
*/
|
@@ -2748,156 +2902,108 @@ Optional telemetry configuration (experimental).
|
|
2748
2902
|
Additional provider-specific options. They are passed through
|
2749
2903
|
to the provider from the AI SDK and enable provider-specific
|
2750
2904
|
functionality that can be fully encapsulated in the provider.
|
2751
|
-
|
2905
|
+
*/
|
2752
2906
|
providerOptions?: ProviderOptions;
|
2753
2907
|
/**
|
2754
|
-
|
2755
|
-
|
2756
|
-
|
2908
|
+
Callback that is invoked when an error occurs during streaming.
|
2909
|
+
You can use it to log errors.
|
2910
|
+
The stream processing will pause until the callback promise is resolved.
|
2911
|
+
*/
|
2912
|
+
onError?: StreamObjectOnErrorCallback;
|
2757
2913
|
/**
|
2758
|
-
|
2759
|
-
|
2760
|
-
|
2761
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2914
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
2915
|
+
*/
|
2916
|
+
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
2762
2917
|
/**
|
2763
|
-
|
2918
|
+
* Internal. For test use only. May change without notice.
|
2764
2919
|
*/
|
2765
|
-
|
2766
|
-
|
2767
|
-
|
2768
|
-
|
2769
|
-
|
2770
|
-
|
2771
|
-
@param options.stepNumber - The number of the step that is being executed.
|
2772
|
-
@param options.model - The model that is being used.
|
2920
|
+
_internal?: {
|
2921
|
+
generateId?: () => string;
|
2922
|
+
currentDate?: () => Date;
|
2923
|
+
now?: () => number;
|
2924
|
+
};
|
2925
|
+
}): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
2773
2926
|
|
2774
|
-
|
2775
|
-
|
2776
|
-
|
2777
|
-
|
2927
|
+
/**
|
2928
|
+
* A generated audio file.
|
2929
|
+
*/
|
2930
|
+
interface GeneratedAudioFile extends GeneratedFile {
|
2778
2931
|
/**
|
2779
|
-
|
2932
|
+
* Audio format of the file (e.g., 'mp3', 'wav', etc.)
|
2780
2933
|
*/
|
2781
|
-
|
2782
|
-
|
2783
|
-
Optional stream transformations.
|
2784
|
-
They are applied in the order they are provided.
|
2785
|
-
The stream transformations must maintain the stream structure for streamText to work correctly.
|
2786
|
-
*/
|
2787
|
-
experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
|
2788
|
-
/**
|
2789
|
-
Whether to include raw chunks from the provider in the stream.
|
2790
|
-
When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
|
2791
|
-
This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
|
2792
|
-
Defaults to false.
|
2793
|
-
*/
|
2794
|
-
includeRawChunks?: boolean;
|
2795
|
-
/**
|
2796
|
-
Callback that is called for each chunk of the stream.
|
2797
|
-
The stream processing will pause until the callback promise is resolved.
|
2798
|
-
*/
|
2799
|
-
onChunk?: StreamTextOnChunkCallback<TOOLS>;
|
2800
|
-
/**
|
2801
|
-
Callback that is invoked when an error occurs during streaming.
|
2802
|
-
You can use it to log errors.
|
2803
|
-
The stream processing will pause until the callback promise is resolved.
|
2804
|
-
*/
|
2805
|
-
onError?: StreamTextOnErrorCallback;
|
2806
|
-
/**
|
2807
|
-
Callback that is called when the LLM response and all request tool executions
|
2808
|
-
(for tools that have an `execute` function) are finished.
|
2809
|
-
|
2810
|
-
The usage is the combined usage of all steps.
|
2811
|
-
*/
|
2812
|
-
onFinish?: StreamTextOnFinishCallback<TOOLS>;
|
2813
|
-
/**
|
2814
|
-
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2815
|
-
*/
|
2816
|
-
onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
|
2817
|
-
/**
|
2818
|
-
Internal. For test use only. May change without notice.
|
2819
|
-
*/
|
2820
|
-
_internal?: {
|
2821
|
-
now?: () => number;
|
2822
|
-
generateId?: IdGenerator;
|
2823
|
-
currentDate?: () => Date;
|
2824
|
-
};
|
2825
|
-
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
2934
|
+
readonly format: string;
|
2935
|
+
}
|
2826
2936
|
|
2827
2937
|
/**
|
2828
|
-
The result of a `
|
2829
|
-
It contains the
|
2938
|
+
The result of a `generateSpeech` call.
|
2939
|
+
It contains the audio data and additional information.
|
2830
2940
|
*/
|
2831
|
-
interface
|
2941
|
+
interface SpeechResult {
|
2832
2942
|
/**
|
2833
|
-
|
2943
|
+
* The audio data as a base64 encoded string or binary data.
|
2834
2944
|
*/
|
2835
|
-
readonly
|
2836
|
-
/**
|
2837
|
-
The images that were generated.
|
2838
|
-
*/
|
2839
|
-
readonly images: Array<GeneratedFile>;
|
2945
|
+
readonly audio: GeneratedAudioFile;
|
2840
2946
|
/**
|
2841
|
-
|
2947
|
+
Warnings for the call, e.g. unsupported settings.
|
2842
2948
|
*/
|
2843
|
-
readonly warnings: Array<
|
2949
|
+
readonly warnings: Array<SpeechWarning>;
|
2844
2950
|
/**
|
2845
|
-
|
2951
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
2846
2952
|
*/
|
2847
|
-
readonly responses: Array<
|
2953
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
2848
2954
|
/**
|
2849
|
-
|
2850
|
-
* results that can be fully encapsulated in the provider.
|
2955
|
+
Provider metadata from the provider.
|
2851
2956
|
*/
|
2852
|
-
readonly providerMetadata:
|
2957
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
2853
2958
|
}
|
2854
2959
|
|
2855
2960
|
/**
|
2856
|
-
Generates
|
2961
|
+
Generates speech audio using a speech model.
|
2857
2962
|
|
2858
|
-
@param model - The
|
2859
|
-
@param
|
2860
|
-
@param
|
2861
|
-
@param
|
2862
|
-
@param
|
2863
|
-
@param
|
2963
|
+
@param model - The speech model to use.
|
2964
|
+
@param text - The text to convert to speech.
|
2965
|
+
@param voice - The voice to use for speech generation.
|
2966
|
+
@param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
|
2967
|
+
@param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
2968
|
+
@param speed - The speed of the speech generation.
|
2864
2969
|
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
2865
2970
|
as body parameters.
|
2866
2971
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2867
2972
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2868
2973
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2869
2974
|
|
2870
|
-
@returns A result object that contains the generated
|
2975
|
+
@returns A result object that contains the generated audio data.
|
2871
2976
|
*/
|
2872
|
-
declare function
|
2977
|
+
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
2873
2978
|
/**
|
2874
|
-
The
|
2979
|
+
The speech model to use.
|
2875
2980
|
*/
|
2876
|
-
model:
|
2981
|
+
model: SpeechModelV2;
|
2877
2982
|
/**
|
2878
|
-
The
|
2983
|
+
The text to convert to speech.
|
2879
2984
|
*/
|
2880
|
-
|
2985
|
+
text: string;
|
2881
2986
|
/**
|
2882
|
-
|
2987
|
+
The voice to use for speech generation.
|
2883
2988
|
*/
|
2884
|
-
|
2989
|
+
voice?: string;
|
2885
2990
|
/**
|
2886
|
-
|
2991
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
2887
2992
|
*/
|
2888
|
-
|
2993
|
+
outputFormat?: 'mp3' | 'wav' | (string & {});
|
2889
2994
|
/**
|
2890
|
-
|
2891
|
-
|
2892
|
-
|
2995
|
+
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
2996
|
+
*/
|
2997
|
+
instructions?: string;
|
2893
2998
|
/**
|
2894
|
-
|
2999
|
+
The speed of the speech generation.
|
2895
3000
|
*/
|
2896
|
-
|
3001
|
+
speed?: number;
|
2897
3002
|
/**
|
2898
|
-
|
3003
|
+
The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
3004
|
+
or "auto" for automatic language detection. Provider support varies.
|
2899
3005
|
*/
|
2900
|
-
|
3006
|
+
language?: string;
|
2901
3007
|
/**
|
2902
3008
|
Additional provider-specific options that are passed through to the provider
|
2903
3009
|
as body parameters.
|
@@ -2906,15 +3012,13 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
2906
3012
|
record is keyed by the provider-specific metadata key.
|
2907
3013
|
```ts
|
2908
3014
|
{
|
2909
|
-
"openai": {
|
2910
|
-
"style": "vivid"
|
2911
|
-
}
|
3015
|
+
"openai": {}
|
2912
3016
|
}
|
2913
3017
|
```
|
2914
3018
|
*/
|
2915
3019
|
providerOptions?: ProviderOptions;
|
2916
3020
|
/**
|
2917
|
-
Maximum number of retries per
|
3021
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
2918
3022
|
|
2919
3023
|
@default 2
|
2920
3024
|
*/
|
@@ -2928,1009 +3032,941 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
2928
3032
|
Only applicable for HTTP-based providers.
|
2929
3033
|
*/
|
2930
3034
|
headers?: Record<string, string>;
|
2931
|
-
}): Promise<
|
3035
|
+
}): Promise<SpeechResult>;
|
2932
3036
|
|
2933
3037
|
/**
|
2934
|
-
|
3038
|
+
* Applies default settings for a language model.
|
2935
3039
|
*/
|
2936
|
-
|
2937
|
-
|
2938
|
-
|
2939
|
-
|
2940
|
-
|
2941
|
-
|
2942
|
-
|
2943
|
-
|
2944
|
-
|
2945
|
-
|
2946
|
-
|
2947
|
-
|
2948
|
-
|
2949
|
-
|
2950
|
-
|
2951
|
-
|
2952
|
-
|
2953
|
-
/**
|
2954
|
-
Additional request information.
|
2955
|
-
*/
|
2956
|
-
readonly request: LanguageModelRequestMetadata;
|
2957
|
-
/**
|
2958
|
-
Additional response information.
|
2959
|
-
*/
|
2960
|
-
readonly response: LanguageModelResponseMetadata & {
|
2961
|
-
/**
|
2962
|
-
Response body (available only for providers that use HTTP requests).
|
2963
|
-
*/
|
2964
|
-
body?: unknown;
|
2965
|
-
};
|
2966
|
-
/**
|
2967
|
-
Additional provider-specific metadata. They are passed through
|
2968
|
-
from the provider to the AI SDK and enable provider-specific
|
2969
|
-
results that can be fully encapsulated in the provider.
|
2970
|
-
*/
|
2971
|
-
readonly providerMetadata: ProviderMetadata | undefined;
|
2972
|
-
/**
|
2973
|
-
Converts the object to a JSON response.
|
2974
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
2975
|
-
*/
|
2976
|
-
toJsonResponse(init?: ResponseInit): Response;
|
2977
|
-
}
|
3040
|
+
declare function defaultSettingsMiddleware({ settings, }: {
|
3041
|
+
settings: Partial<{
|
3042
|
+
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
|
3043
|
+
temperature?: LanguageModelV2CallOptions['temperature'];
|
3044
|
+
stopSequences?: LanguageModelV2CallOptions['stopSequences'];
|
3045
|
+
topP?: LanguageModelV2CallOptions['topP'];
|
3046
|
+
topK?: LanguageModelV2CallOptions['topK'];
|
3047
|
+
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
|
3048
|
+
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
|
3049
|
+
responseFormat?: LanguageModelV2CallOptions['responseFormat'];
|
3050
|
+
seed?: LanguageModelV2CallOptions['seed'];
|
3051
|
+
tools?: LanguageModelV2CallOptions['tools'];
|
3052
|
+
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
3053
|
+
headers?: LanguageModelV2CallOptions['headers'];
|
3054
|
+
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
3055
|
+
}>;
|
3056
|
+
}): LanguageModelV2Middleware;
|
2978
3057
|
|
2979
3058
|
/**
|
2980
|
-
|
2981
|
-
|
3059
|
+
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
3060
|
+
* as a `reasoning` property on the result.
|
3061
|
+
*
|
3062
|
+
* @param tagName - The name of the XML tag to extract reasoning from.
|
3063
|
+
* @param separator - The separator to use between reasoning and text sections.
|
3064
|
+
* @param startWithReasoning - Whether to start with reasoning tokens.
|
3065
|
+
*/
|
3066
|
+
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
3067
|
+
tagName: string;
|
3068
|
+
separator?: string;
|
3069
|
+
startWithReasoning?: boolean;
|
3070
|
+
}): LanguageModelV2Middleware;
|
2982
3071
|
|
2983
|
-
Should return the repaired text or null if the text cannot be repaired.
|
2984
|
-
*/
|
2985
|
-
type RepairTextFunction = (options: {
|
2986
|
-
text: string;
|
2987
|
-
error: JSONParseError | TypeValidationError;
|
2988
|
-
}) => Promise<string | null>;
|
2989
3072
|
/**
|
2990
|
-
|
2991
|
-
|
2992
|
-
|
2993
|
-
|
2994
|
-
@param model - The language model to use.
|
2995
|
-
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
3073
|
+
* Simulates streaming chunks with the response from a generate call.
|
3074
|
+
*/
|
3075
|
+
declare function simulateStreamingMiddleware(): LanguageModelV2Middleware;
|
2996
3076
|
|
2997
|
-
|
2998
|
-
|
2999
|
-
|
3077
|
+
/**
|
3078
|
+
* Wraps a LanguageModelV2 instance with middleware functionality.
|
3079
|
+
* This function allows you to apply middleware to transform parameters,
|
3080
|
+
* wrap generate operations, and wrap stream operations of a language model.
|
3081
|
+
*
|
3082
|
+
* @param options - Configuration options for wrapping the language model.
|
3083
|
+
* @param options.model - The original LanguageModelV2 instance to be wrapped.
|
3084
|
+
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
3085
|
+
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
3086
|
+
* @param options.providerId - Optional custom provider ID to override the original model's provider.
|
3087
|
+
* @returns A new LanguageModelV2 instance with middleware applied.
|
3088
|
+
*/
|
3089
|
+
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
3090
|
+
model: LanguageModelV2;
|
3091
|
+
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
3092
|
+
modelId?: string;
|
3093
|
+
providerId?: string;
|
3094
|
+
}) => LanguageModelV2;
|
3000
3095
|
|
3001
|
-
|
3002
|
-
|
3003
|
-
|
3004
|
-
|
3005
|
-
@param
|
3006
|
-
|
3007
|
-
|
3008
|
-
@param
|
3009
|
-
|
3010
|
-
|
3011
|
-
@
|
3012
|
-
|
3013
|
-
|
3014
|
-
|
3015
|
-
|
3016
|
-
|
3017
|
-
|
3018
|
-
|
3019
|
-
|
3020
|
-
|
3021
|
-
|
3022
|
-
|
3023
|
-
|
3024
|
-
|
3025
|
-
|
3026
|
-
|
3027
|
-
|
3028
|
-
|
3029
|
-
|
3030
|
-
|
3031
|
-
|
3032
|
-
|
3096
|
+
/**
|
3097
|
+
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
3098
|
+
*
|
3099
|
+
* @param {Object} options - The options for creating the custom provider.
|
3100
|
+
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
3101
|
+
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
3102
|
+
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
3103
|
+
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
3104
|
+
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
3105
|
+
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
3106
|
+
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
3107
|
+
*
|
3108
|
+
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
3109
|
+
*/
|
3110
|
+
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV2>, SPEECH_MODELS extends Record<string, SpeechModelV2>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
3111
|
+
languageModels?: LANGUAGE_MODELS;
|
3112
|
+
textEmbeddingModels?: EMBEDDING_MODELS;
|
3113
|
+
imageModels?: IMAGE_MODELS;
|
3114
|
+
transcriptionModels?: TRANSCRIPTION_MODELS;
|
3115
|
+
speechModels?: SPEECH_MODELS;
|
3116
|
+
fallbackProvider?: ProviderV2;
|
3117
|
+
}): ProviderV2 & {
|
3118
|
+
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
3119
|
+
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
|
3120
|
+
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
3121
|
+
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV2;
|
3122
|
+
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV2;
|
3123
|
+
};
|
3124
|
+
/**
|
3125
|
+
* @deprecated Use `customProvider` instead.
|
3126
|
+
*/
|
3127
|
+
declare const experimental_customProvider: typeof customProvider;
|
3128
|
+
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
3033
3129
|
|
3034
|
-
|
3130
|
+
declare const symbol: unique symbol;
|
3131
|
+
declare class NoSuchProviderError extends NoSuchModelError {
|
3132
|
+
private readonly [symbol];
|
3133
|
+
readonly providerId: string;
|
3134
|
+
readonly availableProviders: string[];
|
3135
|
+
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
3136
|
+
modelId: string;
|
3137
|
+
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
|
3138
|
+
providerId: string;
|
3139
|
+
availableProviders: string[];
|
3140
|
+
message?: string;
|
3141
|
+
});
|
3142
|
+
static isInstance(error: unknown): error is NoSuchProviderError;
|
3143
|
+
}
|
3035
3144
|
|
3036
|
-
|
3037
|
-
|
3038
|
-
|
3039
|
-
|
3145
|
+
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
3146
|
+
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
3147
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
3148
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
3149
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
|
3150
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
|
3151
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
3152
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
3153
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV2;
|
3154
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV2;
|
3155
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV2;
|
3156
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV2;
|
3157
|
+
}
|
3158
|
+
/**
|
3159
|
+
* Creates a registry for the given providers.
|
3160
|
+
*/
|
3161
|
+
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
|
3162
|
+
separator?: SEPARATOR;
|
3163
|
+
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
3164
|
+
/**
|
3165
|
+
* @deprecated Use `createProviderRegistry` instead.
|
3166
|
+
*/
|
3167
|
+
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
3040
3168
|
|
3041
|
-
|
3042
|
-
|
3169
|
+
declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
|
3170
|
+
textStream: ReadableStream<string>;
|
3171
|
+
}): Response;
|
3043
3172
|
|
3044
|
-
|
3173
|
+
declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
|
3174
|
+
response: ServerResponse;
|
3175
|
+
textStream: ReadableStream<string>;
|
3176
|
+
} & ResponseInit): void;
|
3045
3177
|
|
3046
|
-
|
3047
|
-
|
3048
|
-
|
3178
|
+
declare const JSONRPCRequestSchema: z.ZodObject<{
|
3179
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3180
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3181
|
+
method: z.ZodString;
|
3182
|
+
params: z.ZodOptional<z.ZodObject<{
|
3183
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3184
|
+
}, z.core.$loose>>;
|
3185
|
+
}, z.core.$strict>;
|
3186
|
+
type JSONRPCRequest = z.infer<typeof JSONRPCRequestSchema>;
|
3187
|
+
declare const JSONRPCResponseSchema: z.ZodObject<{
|
3188
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3189
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3190
|
+
result: z.ZodObject<{
|
3191
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3192
|
+
}, z.core.$loose>;
|
3193
|
+
}, z.core.$strict>;
|
3194
|
+
type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
|
3195
|
+
declare const JSONRPCErrorSchema: z.ZodObject<{
|
3196
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3197
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3198
|
+
error: z.ZodObject<{
|
3199
|
+
code: z.ZodNumber;
|
3200
|
+
message: z.ZodString;
|
3201
|
+
data: z.ZodOptional<z.ZodUnknown>;
|
3202
|
+
}, z.core.$strip>;
|
3203
|
+
}, z.core.$strict>;
|
3204
|
+
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
3205
|
+
declare const JSONRPCNotificationSchema: z.ZodObject<{
|
3206
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3207
|
+
method: z.ZodString;
|
3208
|
+
params: z.ZodOptional<z.ZodObject<{
|
3209
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3210
|
+
}, z.core.$loose>>;
|
3211
|
+
}, z.core.$strict>;
|
3212
|
+
type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
|
3213
|
+
declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
|
3214
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3215
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3216
|
+
method: z.ZodString;
|
3217
|
+
params: z.ZodOptional<z.ZodObject<{
|
3218
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3219
|
+
}, z.core.$loose>>;
|
3220
|
+
}, z.core.$strict>, z.ZodObject<{
|
3221
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3222
|
+
method: z.ZodString;
|
3223
|
+
params: z.ZodOptional<z.ZodObject<{
|
3224
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3225
|
+
}, z.core.$loose>>;
|
3226
|
+
}, z.core.$strict>, z.ZodObject<{
|
3227
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3228
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3229
|
+
result: z.ZodObject<{
|
3230
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3231
|
+
}, z.core.$loose>;
|
3232
|
+
}, z.core.$strict>, z.ZodObject<{
|
3233
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3234
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3235
|
+
error: z.ZodObject<{
|
3236
|
+
code: z.ZodNumber;
|
3237
|
+
message: z.ZodString;
|
3238
|
+
data: z.ZodOptional<z.ZodUnknown>;
|
3239
|
+
}, z.core.$strip>;
|
3240
|
+
}, z.core.$strict>]>;
|
3241
|
+
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
3049
3242
|
|
3050
|
-
|
3051
|
-
|
3243
|
+
/**
|
3244
|
+
* Transport interface for MCP (Model Context Protocol) communication.
|
3245
|
+
* Maps to the `Transport` interface in the MCP spec.
|
3052
3246
|
*/
|
3053
|
-
|
3054
|
-
/**
|
3055
|
-
The enum values that the model should use.
|
3056
|
-
*/
|
3057
|
-
enum: Array<RESULT>;
|
3058
|
-
mode?: 'json';
|
3059
|
-
output: 'enum';
|
3060
|
-
} : OUTPUT extends 'no-schema' ? {} : {
|
3061
|
-
/**
|
3062
|
-
The schema of the object that the model should generate.
|
3063
|
-
*/
|
3064
|
-
schema: SCHEMA;
|
3247
|
+
interface MCPTransport {
|
3065
3248
|
/**
|
3066
|
-
|
3067
|
-
|
3068
|
-
|
3069
|
-
*/
|
3070
|
-
schemaName?: string;
|
3249
|
+
* Initialize and start the transport
|
3250
|
+
*/
|
3251
|
+
start(): Promise<void>;
|
3071
3252
|
/**
|
3072
|
-
|
3073
|
-
|
3074
|
-
|
3075
|
-
|
3076
|
-
schemaDescription?: string;
|
3253
|
+
* Send a JSON-RPC message through the transport
|
3254
|
+
* @param message The JSON-RPC message to send
|
3255
|
+
*/
|
3256
|
+
send(message: JSONRPCMessage): Promise<void>;
|
3077
3257
|
/**
|
3078
|
-
|
3079
|
-
|
3080
|
-
|
3081
|
-
|
3082
|
-
- 'auto': The provider will choose the best mode for the model.
|
3083
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
3084
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
3085
|
-
|
3086
|
-
Please note that most providers do not support all modes.
|
3087
|
-
|
3088
|
-
Default and recommended: 'auto' (best mode for the model).
|
3089
|
-
*/
|
3090
|
-
mode?: 'auto' | 'json' | 'tool';
|
3091
|
-
}) & {
|
3092
|
-
output?: OUTPUT;
|
3258
|
+
* Clean up and close the transport
|
3259
|
+
*/
|
3260
|
+
close(): Promise<void>;
|
3093
3261
|
/**
|
3094
|
-
|
3262
|
+
* Event handler for transport closure
|
3095
3263
|
*/
|
3096
|
-
|
3264
|
+
onclose?: () => void;
|
3097
3265
|
/**
|
3098
|
-
|
3099
|
-
to enable JSON parsing.
|
3266
|
+
* Event handler for transport errors
|
3100
3267
|
*/
|
3101
|
-
|
3268
|
+
onerror?: (error: Error) => void;
|
3102
3269
|
/**
|
3103
|
-
|
3104
|
-
|
3105
|
-
|
3270
|
+
* Event handler for received messages
|
3271
|
+
*/
|
3272
|
+
onmessage?: (message: JSONRPCMessage) => void;
|
3273
|
+
}
|
3274
|
+
type MCPTransportConfig = {
|
3275
|
+
type: 'sse';
|
3106
3276
|
/**
|
3107
|
-
|
3108
|
-
|
3109
|
-
|
3110
|
-
*/
|
3111
|
-
providerOptions?: ProviderOptions;
|
3277
|
+
* The URL of the MCP server.
|
3278
|
+
*/
|
3279
|
+
url: string;
|
3112
3280
|
/**
|
3113
|
-
*
|
3281
|
+
* Additional HTTP headers to be sent with requests.
|
3114
3282
|
*/
|
3115
|
-
|
3116
|
-
|
3117
|
-
currentDate?: () => Date;
|
3118
|
-
};
|
3119
|
-
}): Promise<GenerateObjectResult<RESULT>>;
|
3283
|
+
headers?: Record<string, string>;
|
3284
|
+
};
|
3120
3285
|
|
3121
|
-
|
3122
|
-
|
3286
|
+
type ToolSchemas = Record<string, {
|
3287
|
+
inputSchema: FlexibleSchema<JSONObject | unknown>;
|
3288
|
+
}> | 'automatic' | undefined;
|
3289
|
+
type MappedTool<T extends Tool | JSONObject, OUTPUT extends any> = T extends Tool<infer INPUT> ? Tool<INPUT, OUTPUT> : T extends JSONObject ? Tool<T, OUTPUT> : never;
|
3290
|
+
type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
|
3291
|
+
inputSchema: FlexibleSchema<unknown>;
|
3292
|
+
}> ? {
|
3293
|
+
[K in keyof TOOL_SCHEMAS]: MappedTool<TOOL_SCHEMAS[K], CallToolResult> & Required<Pick<MappedTool<TOOL_SCHEMAS[K], CallToolResult>, 'execute'>>;
|
3294
|
+
} : McpToolSet<Record<string, {
|
3295
|
+
inputSchema: FlexibleSchema<unknown>;
|
3296
|
+
}>>;
|
3297
|
+
declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<{
|
3298
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3299
|
+
content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
3300
|
+
type: z.ZodLiteral<"text">;
|
3301
|
+
text: z.ZodString;
|
3302
|
+
}, z.core.$loose>, z.ZodObject<{
|
3303
|
+
type: z.ZodLiteral<"image">;
|
3304
|
+
data: z.ZodString;
|
3305
|
+
mimeType: z.ZodString;
|
3306
|
+
}, z.core.$loose>, z.ZodObject<{
|
3307
|
+
type: z.ZodLiteral<"resource">;
|
3308
|
+
resource: z.ZodUnion<readonly [z.ZodObject<{
|
3309
|
+
uri: z.ZodString;
|
3310
|
+
mimeType: z.ZodOptional<z.ZodString>;
|
3311
|
+
text: z.ZodString;
|
3312
|
+
}, z.core.$loose>, z.ZodObject<{
|
3313
|
+
uri: z.ZodString;
|
3314
|
+
mimeType: z.ZodOptional<z.ZodString>;
|
3315
|
+
blob: z.ZodString;
|
3316
|
+
}, z.core.$loose>]>;
|
3317
|
+
}, z.core.$loose>]>>;
|
3318
|
+
isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
|
3319
|
+
}, z.core.$loose>, z.ZodObject<{
|
3320
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3321
|
+
toolResult: z.ZodUnknown;
|
3322
|
+
}, z.core.$loose>]>;
|
3323
|
+
type CallToolResult = z.infer<typeof CallToolResultSchema>;
|
3324
|
+
|
3325
|
+
interface MCPClientConfig {
|
3326
|
+
/** Transport configuration for connecting to the MCP server */
|
3327
|
+
transport: MCPTransportConfig | MCPTransport;
|
3328
|
+
/** Optional callback for uncaught errors */
|
3329
|
+
onUncaughtError?: (error: unknown) => void;
|
3330
|
+
/** Optional client name, defaults to 'ai-sdk-mcp-client' */
|
3331
|
+
name?: string;
|
3332
|
+
}
|
3333
|
+
declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
|
3334
|
+
/**
|
3335
|
+
* A lightweight MCP Client implementation
|
3336
|
+
*
|
3337
|
+
* The primary purpose of this client is tool conversion between MCP<>AI SDK
|
3338
|
+
* but can later be extended to support other MCP features
|
3339
|
+
*
|
3340
|
+
* Tool parameters are automatically inferred from the server's JSON schema
|
3341
|
+
* if not explicitly provided in the tools configuration
|
3342
|
+
*
|
3343
|
+
* This client is meant to be used to communicate with a single server. To communicate and fetch tools across multiple servers, it's recommended to create a new client instance per server.
|
3344
|
+
*
|
3345
|
+
* Not supported:
|
3346
|
+
* - Client options (e.g. sampling, roots) as they are not needed for tool conversion
|
3347
|
+
* - Accepting notifications
|
3348
|
+
* - Session management (when passing a sessionId to an instance of the Streamable HTTP transport)
|
3349
|
+
* - Resumable SSE streams
|
3123
3350
|
*/
|
3124
|
-
|
3125
|
-
|
3126
|
-
|
3127
|
-
|
3128
|
-
|
3129
|
-
|
3130
|
-
|
3131
|
-
|
3132
|
-
|
3351
|
+
declare class MCPClient {
|
3352
|
+
private transport;
|
3353
|
+
private onUncaughtError?;
|
3354
|
+
private clientInfo;
|
3355
|
+
private requestMessageId;
|
3356
|
+
private responseHandlers;
|
3357
|
+
private serverCapabilities;
|
3358
|
+
private isClosed;
|
3359
|
+
constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
|
3360
|
+
init(): Promise<this>;
|
3361
|
+
close(): Promise<void>;
|
3362
|
+
private assertCapability;
|
3363
|
+
private request;
|
3364
|
+
private listTools;
|
3365
|
+
private callTool;
|
3366
|
+
private notification;
|
3133
3367
|
/**
|
3134
|
-
|
3135
|
-
|
3136
|
-
results that can be fully encapsulated in the provider.
|
3368
|
+
* Returns a set of AI SDK tools from the MCP server
|
3369
|
+
* @returns A record of tool names to their implementations
|
3137
3370
|
*/
|
3138
|
-
|
3139
|
-
|
3140
|
-
|
3141
|
-
|
3142
|
-
|
3143
|
-
|
3144
|
-
|
3145
|
-
|
3146
|
-
|
3371
|
+
tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>({ schemas, }?: {
|
3372
|
+
schemas?: TOOL_SCHEMAS;
|
3373
|
+
}): Promise<McpToolSet<TOOL_SCHEMAS>>;
|
3374
|
+
private onClose;
|
3375
|
+
private onError;
|
3376
|
+
private onResponse;
|
3377
|
+
}
|
3378
|
+
|
3379
|
+
/**
|
3380
|
+
The result of a `transcribe` call.
|
3381
|
+
It contains the transcript and additional information.
|
3382
|
+
*/
|
3383
|
+
interface TranscriptionResult {
|
3147
3384
|
/**
|
3148
|
-
|
3149
|
-
|
3150
|
-
readonly
|
3385
|
+
* The complete transcribed text from the audio.
|
3386
|
+
*/
|
3387
|
+
readonly text: string;
|
3151
3388
|
/**
|
3152
|
-
|
3153
|
-
|
3154
|
-
|
3155
|
-
|
3156
|
-
|
3157
|
-
|
3389
|
+
* Array of transcript segments with timing information.
|
3390
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
3391
|
+
*/
|
3392
|
+
readonly segments: Array<{
|
3393
|
+
/**
|
3394
|
+
* The text content of this segment.
|
3395
|
+
*/
|
3396
|
+
readonly text: string;
|
3397
|
+
/**
|
3398
|
+
* The start time of this segment in seconds.
|
3399
|
+
*/
|
3400
|
+
readonly startSecond: number;
|
3401
|
+
/**
|
3402
|
+
* The end time of this segment in seconds.
|
3403
|
+
*/
|
3404
|
+
readonly endSecond: number;
|
3405
|
+
}>;
|
3158
3406
|
/**
|
3159
|
-
*
|
3407
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
3408
|
+
* May be undefined if the language couldn't be detected.
|
3160
3409
|
*/
|
3161
|
-
readonly
|
3410
|
+
readonly language: string | undefined;
|
3162
3411
|
/**
|
3163
|
-
|
3164
|
-
|
3165
|
-
|
3166
|
-
readonly
|
3412
|
+
* The total duration of the audio file in seconds.
|
3413
|
+
* May be undefined if the duration couldn't be determined.
|
3414
|
+
*/
|
3415
|
+
readonly durationInSeconds: number | undefined;
|
3167
3416
|
/**
|
3168
|
-
|
3169
|
-
Only errors that stop the stream, such as network errors, are thrown.
|
3417
|
+
Warnings for the call, e.g. unsupported settings.
|
3170
3418
|
*/
|
3171
|
-
readonly
|
3419
|
+
readonly warnings: Array<TranscriptionWarning>;
|
3172
3420
|
/**
|
3173
|
-
|
3174
|
-
|
3175
|
-
|
3176
|
-
|
3177
|
-
@param response A Node.js response-like object (ServerResponse).
|
3178
|
-
@param init Optional headers, status code, and status text.
|
3179
|
-
*/
|
3180
|
-
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
3421
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
3422
|
+
*/
|
3423
|
+
readonly responses: Array<TranscriptionModelResponseMetadata>;
|
3181
3424
|
/**
|
3182
|
-
|
3183
|
-
|
3184
|
-
|
3185
|
-
Non-text-delta events are ignored.
|
3186
|
-
|
3187
|
-
@param init Optional headers, status code, and status text.
|
3188
|
-
*/
|
3189
|
-
toTextStreamResponse(init?: ResponseInit): Response;
|
3425
|
+
Provider metadata from the provider.
|
3426
|
+
*/
|
3427
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
3190
3428
|
}
|
3191
|
-
type ObjectStreamPart<PARTIAL> = {
|
3192
|
-
type: 'object';
|
3193
|
-
object: PARTIAL;
|
3194
|
-
} | {
|
3195
|
-
type: 'text-delta';
|
3196
|
-
textDelta: string;
|
3197
|
-
} | {
|
3198
|
-
type: 'error';
|
3199
|
-
error: unknown;
|
3200
|
-
} | {
|
3201
|
-
type: 'finish';
|
3202
|
-
finishReason: FinishReason;
|
3203
|
-
usage: LanguageModelUsage;
|
3204
|
-
response: LanguageModelResponseMetadata;
|
3205
|
-
providerMetadata?: ProviderMetadata;
|
3206
|
-
};
|
3207
3429
|
|
3208
3430
|
/**
|
3209
|
-
|
3431
|
+
Generates transcripts using a transcription model.
|
3210
3432
|
|
3211
|
-
@param
|
3212
|
-
|
3213
|
-
|
3214
|
-
|
3215
|
-
|
3216
|
-
|
3217
|
-
|
3433
|
+
@param model - The transcription model to use.
|
3434
|
+
@param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
|
3435
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
3436
|
+
as body parameters.
|
3437
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3438
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3439
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3218
3440
|
|
3219
|
-
@
|
3441
|
+
@returns A result object that contains the generated transcript.
|
3220
3442
|
*/
|
3221
|
-
|
3222
|
-
/**
|
3223
|
-
The token usage of the generated response.
|
3224
|
-
*/
|
3225
|
-
usage: LanguageModelUsage;
|
3443
|
+
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3226
3444
|
/**
|
3227
|
-
The
|
3228
|
-
|
3229
|
-
|
3445
|
+
The transcription model to use.
|
3446
|
+
*/
|
3447
|
+
model: TranscriptionModelV2;
|
3230
3448
|
/**
|
3231
|
-
|
3232
|
-
|
3233
|
-
|
3449
|
+
The audio data to transcribe.
|
3450
|
+
*/
|
3451
|
+
audio: DataContent | URL;
|
3234
3452
|
/**
|
3235
|
-
|
3236
|
-
|
3237
|
-
|
3453
|
+
Additional provider-specific options that are passed through to the provider
|
3454
|
+
as body parameters.
|
3455
|
+
|
3456
|
+
The outer record is keyed by the provider name, and the inner
|
3457
|
+
record is keyed by the provider-specific metadata key.
|
3458
|
+
```ts
|
3459
|
+
{
|
3460
|
+
"openai": {
|
3461
|
+
"temperature": 0
|
3462
|
+
}
|
3463
|
+
}
|
3464
|
+
```
|
3465
|
+
*/
|
3466
|
+
providerOptions?: ProviderOptions;
|
3238
3467
|
/**
|
3239
|
-
|
3240
|
-
|
3241
|
-
|
3242
|
-
|
3243
|
-
|
3244
|
-
to the provider from the AI SDK and enable provider-specific
|
3245
|
-
functionality that can be fully encapsulated in the provider.
|
3246
|
-
*/
|
3247
|
-
providerMetadata: ProviderMetadata | undefined;
|
3248
|
-
}) => Promise<void> | void;
|
3249
|
-
/**
|
3250
|
-
Generate a structured, typed object for a given prompt and schema using a language model.
|
3251
|
-
|
3252
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
3253
|
-
|
3254
|
-
@param model - The language model to use.
|
3255
|
-
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
3256
|
-
|
3257
|
-
@param system - A system message that will be part of the prompt.
|
3258
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
3259
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
3260
|
-
|
3261
|
-
@param maxOutputTokens - Maximum number of tokens to generate.
|
3262
|
-
@param temperature - Temperature setting.
|
3263
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3264
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
3265
|
-
@param topP - Nucleus sampling.
|
3266
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3267
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
3268
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
3269
|
-
Used to remove "long tail" low probability responses.
|
3270
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
3271
|
-
@param presencePenalty - Presence penalty setting.
|
3272
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
3273
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3274
|
-
@param frequencyPenalty - Frequency penalty setting.
|
3275
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
3276
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3277
|
-
@param stopSequences - Stop sequences.
|
3278
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
3279
|
-
@param seed - The seed (integer) to use for random sampling.
|
3280
|
-
If set and supported by the model, calls will generate deterministic results.
|
3281
|
-
|
3282
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3283
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3284
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3285
|
-
|
3286
|
-
@param schema - The schema of the object that the model should generate.
|
3287
|
-
@param schemaName - Optional name of the output that should be generated.
|
3288
|
-
Used by some providers for additional LLM guidance, e.g.
|
3289
|
-
via tool or schema name.
|
3290
|
-
@param schemaDescription - Optional description of the output that should be generated.
|
3291
|
-
Used by some providers for additional LLM guidance, e.g.
|
3292
|
-
via tool or schema description.
|
3293
|
-
|
3294
|
-
@param output - The type of the output.
|
3295
|
-
|
3296
|
-
- 'object': The output is an object.
|
3297
|
-
- 'array': The output is an array.
|
3298
|
-
- 'enum': The output is an enum.
|
3299
|
-
- 'no-schema': The output is not a schema.
|
3300
|
-
|
3301
|
-
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
3302
|
-
|
3303
|
-
@param providerOptions - Additional provider-specific options. They are passed through
|
3304
|
-
to the provider from the AI SDK and enable provider-specific
|
3305
|
-
functionality that can be fully encapsulated in the provider.
|
3306
|
-
|
3307
|
-
@returns
|
3308
|
-
A result object for accessing the partial object stream and additional information.
|
3309
|
-
*/
|
3310
|
-
declare function streamObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
3311
|
-
/**
|
3312
|
-
The enum values that the model should use.
|
3313
|
-
*/
|
3314
|
-
enum: Array<RESULT>;
|
3315
|
-
mode?: 'json';
|
3316
|
-
output: 'enum';
|
3317
|
-
} : OUTPUT extends 'no-schema' ? {} : {
|
3318
|
-
/**
|
3319
|
-
The schema of the object that the model should generate.
|
3320
|
-
*/
|
3321
|
-
schema: SCHEMA;
|
3322
|
-
/**
|
3323
|
-
Optional name of the output that should be generated.
|
3324
|
-
Used by some providers for additional LLM guidance, e.g.
|
3325
|
-
via tool or schema name.
|
3326
|
-
*/
|
3327
|
-
schemaName?: string;
|
3468
|
+
Maximum number of retries per transcript model call. Set to 0 to disable retries.
|
3469
|
+
|
3470
|
+
@default 2
|
3471
|
+
*/
|
3472
|
+
maxRetries?: number;
|
3328
3473
|
/**
|
3329
|
-
|
3330
|
-
|
3331
|
-
|
3332
|
-
*/
|
3333
|
-
schemaDescription?: string;
|
3474
|
+
Abort signal.
|
3475
|
+
*/
|
3476
|
+
abortSignal?: AbortSignal;
|
3334
3477
|
/**
|
3335
|
-
|
3336
|
-
|
3337
|
-
|
3338
|
-
|
3339
|
-
|
3340
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
3341
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
3478
|
+
Additional headers to include in the request.
|
3479
|
+
Only applicable for HTTP-based providers.
|
3480
|
+
*/
|
3481
|
+
headers?: Record<string, string>;
|
3482
|
+
}): Promise<TranscriptionResult>;
|
3342
3483
|
|
3343
|
-
|
3484
|
+
declare const getOriginalFetch: () => typeof fetch;
|
3485
|
+
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
3486
|
+
api: string;
|
3487
|
+
prompt: string;
|
3488
|
+
credentials: RequestCredentials | undefined;
|
3489
|
+
headers: HeadersInit | undefined;
|
3490
|
+
body: Record<string, any>;
|
3491
|
+
streamProtocol: 'data' | 'text' | undefined;
|
3492
|
+
setCompletion: (completion: string) => void;
|
3493
|
+
setLoading: (loading: boolean) => void;
|
3494
|
+
setError: (error: Error | undefined) => void;
|
3495
|
+
setAbortController: (abortController: AbortController | null) => void;
|
3496
|
+
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
3497
|
+
onError: ((error: Error) => void) | undefined;
|
3498
|
+
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
3499
|
+
}): Promise<string | null | undefined>;
|
3344
3500
|
|
3345
|
-
|
3346
|
-
*/
|
3347
|
-
mode?: 'auto' | 'json' | 'tool';
|
3348
|
-
}) & {
|
3349
|
-
output?: OUTPUT;
|
3350
|
-
/**
|
3351
|
-
The language model to use.
|
3352
|
-
*/
|
3353
|
-
model: LanguageModel;
|
3501
|
+
interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
|
3354
3502
|
/**
|
3355
|
-
|
3503
|
+
* Appends a data stream part to the stream.
|
3356
3504
|
*/
|
3357
|
-
|
3358
|
-
/**
|
3359
|
-
Additional provider-specific options. They are passed through
|
3360
|
-
to the provider from the AI SDK and enable provider-specific
|
3361
|
-
functionality that can be fully encapsulated in the provider.
|
3362
|
-
*/
|
3363
|
-
providerOptions?: ProviderOptions;
|
3364
|
-
/**
|
3365
|
-
Callback that is invoked when an error occurs during streaming.
|
3366
|
-
You can use it to log errors.
|
3367
|
-
The stream processing will pause until the callback promise is resolved.
|
3368
|
-
*/
|
3369
|
-
onError?: StreamObjectOnErrorCallback;
|
3370
|
-
/**
|
3371
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
3372
|
-
*/
|
3373
|
-
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
3505
|
+
write(part: InferUIMessageChunk<UI_MESSAGE>): void;
|
3374
3506
|
/**
|
3375
|
-
*
|
3507
|
+
* Merges the contents of another stream to this stream.
|
3376
3508
|
*/
|
3377
|
-
|
3378
|
-
generateId?: () => string;
|
3379
|
-
currentDate?: () => Date;
|
3380
|
-
now?: () => number;
|
3381
|
-
};
|
3382
|
-
}): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
3383
|
-
|
3384
|
-
/**
|
3385
|
-
* A generated audio file.
|
3386
|
-
*/
|
3387
|
-
interface GeneratedAudioFile extends GeneratedFile {
|
3509
|
+
merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
|
3388
3510
|
/**
|
3389
|
-
*
|
3511
|
+
* Error handler that is used by the data stream writer.
|
3512
|
+
* This is intended for forwarding when merging streams
|
3513
|
+
* to prevent duplicated error masking.
|
3390
3514
|
*/
|
3391
|
-
|
3515
|
+
onError: ErrorHandler | undefined;
|
3392
3516
|
}
|
3393
3517
|
|
3394
|
-
|
3395
|
-
|
3396
|
-
|
3397
|
-
|
3398
|
-
|
3399
|
-
/**
|
3400
|
-
* The audio data as a base64 encoded string or binary data.
|
3401
|
-
*/
|
3402
|
-
readonly audio: GeneratedAudioFile;
|
3403
|
-
/**
|
3404
|
-
Warnings for the call, e.g. unsupported settings.
|
3405
|
-
*/
|
3406
|
-
readonly warnings: Array<SpeechWarning>;
|
3407
|
-
/**
|
3408
|
-
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
3409
|
-
*/
|
3410
|
-
readonly responses: Array<SpeechModelResponseMetadata>;
|
3518
|
+
declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
|
3519
|
+
execute: (options: {
|
3520
|
+
writer: UIMessageStreamWriter<UI_MESSAGE>;
|
3521
|
+
}) => Promise<void> | void;
|
3522
|
+
onError?: (error: unknown) => string;
|
3411
3523
|
/**
|
3412
|
-
|
3524
|
+
* The original messages. If they are provided, persistence mode is assumed,
|
3525
|
+
* and a message ID is provided for the response message.
|
3413
3526
|
*/
|
3414
|
-
|
3527
|
+
originalMessages?: UI_MESSAGE[];
|
3528
|
+
onFinish?: (options: {
|
3529
|
+
/**
|
3530
|
+
* The updates list of UI messages.
|
3531
|
+
*/
|
3532
|
+
messages: UI_MESSAGE[];
|
3533
|
+
/**
|
3534
|
+
* Indicates whether the response message is a continuation of the last original message,
|
3535
|
+
* or if a new message was created.
|
3536
|
+
*/
|
3537
|
+
isContinuation: boolean;
|
3538
|
+
/**
|
3539
|
+
* The message that was sent to the client as a response
|
3540
|
+
* (including the original message if it was extended).
|
3541
|
+
*/
|
3542
|
+
responseMessage: UI_MESSAGE;
|
3543
|
+
}) => void;
|
3544
|
+
generateId?: IdGenerator;
|
3545
|
+
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
3546
|
+
|
3547
|
+
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
|
3548
|
+
stream: ReadableStream<UIMessageChunk>;
|
3549
|
+
}): Response;
|
3550
|
+
|
3551
|
+
declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
|
3552
|
+
constructor();
|
3415
3553
|
}
|
3416
3554
|
|
3555
|
+
declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
|
3556
|
+
response: ServerResponse;
|
3557
|
+
stream: ReadableStream<UIMessageChunk>;
|
3558
|
+
} & UIMessageStreamResponseInit): void;
|
3559
|
+
|
3417
3560
|
/**
|
3418
|
-
|
3561
|
+
* Transforms a stream of `UIMessageChunk`s into an `AsyncIterableStream` of `UIMessage`s.
|
3562
|
+
*
|
3563
|
+
* @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
|
3564
|
+
* @param options.stream - The stream of `UIMessageChunk`s to read.
|
3565
|
+
*
|
3566
|
+
* @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
|
3567
|
+
* as it is being completed.
|
3568
|
+
*/
|
3569
|
+
declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, }: {
|
3570
|
+
message?: UI_MESSAGE;
|
3571
|
+
stream: ReadableStream<UIMessageChunk>;
|
3572
|
+
}): AsyncIterableStream<UI_MESSAGE>;
|
3419
3573
|
|
3420
|
-
|
3421
|
-
|
3422
|
-
|
3423
|
-
|
3424
|
-
|
3425
|
-
|
3426
|
-
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
3427
|
-
as body parameters.
|
3428
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3429
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3430
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3431
|
-
|
3432
|
-
@returns A result object that contains the generated audio data.
|
3433
|
-
*/
|
3434
|
-
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3435
|
-
/**
|
3436
|
-
The speech model to use.
|
3437
|
-
*/
|
3438
|
-
model: SpeechModelV2;
|
3439
|
-
/**
|
3440
|
-
The text to convert to speech.
|
3441
|
-
*/
|
3442
|
-
text: string;
|
3443
|
-
/**
|
3444
|
-
The voice to use for speech generation.
|
3445
|
-
*/
|
3446
|
-
voice?: string;
|
3447
|
-
/**
|
3448
|
-
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
3449
|
-
*/
|
3450
|
-
outputFormat?: 'mp3' | 'wav' | (string & {});
|
3451
|
-
/**
|
3452
|
-
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
3453
|
-
*/
|
3454
|
-
instructions?: string;
|
3455
|
-
/**
|
3456
|
-
The speed of the speech generation.
|
3457
|
-
*/
|
3458
|
-
speed?: number;
|
3459
|
-
/**
|
3460
|
-
The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
3461
|
-
or "auto" for automatic language detection. Provider support varies.
|
3462
|
-
*/
|
3463
|
-
language?: string;
|
3464
|
-
/**
|
3465
|
-
Additional provider-specific options that are passed through to the provider
|
3466
|
-
as body parameters.
|
3467
|
-
|
3468
|
-
The outer record is keyed by the provider name, and the inner
|
3469
|
-
record is keyed by the provider-specific metadata key.
|
3470
|
-
```ts
|
3471
|
-
{
|
3472
|
-
"openai": {}
|
3473
|
-
}
|
3474
|
-
```
|
3475
|
-
*/
|
3476
|
-
providerOptions?: ProviderOptions;
|
3477
|
-
/**
|
3478
|
-
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
3479
|
-
|
3480
|
-
@default 2
|
3481
|
-
*/
|
3482
|
-
maxRetries?: number;
|
3483
|
-
/**
|
3484
|
-
Abort signal.
|
3485
|
-
*/
|
3486
|
-
abortSignal?: AbortSignal;
|
3487
|
-
/**
|
3488
|
-
Additional headers to include in the request.
|
3489
|
-
Only applicable for HTTP-based providers.
|
3490
|
-
*/
|
3491
|
-
headers?: Record<string, string>;
|
3492
|
-
}): Promise<SpeechResult>;
|
3493
|
-
|
3494
|
-
/**
|
3495
|
-
* Applies default settings for a language model.
|
3496
|
-
*/
|
3497
|
-
declare function defaultSettingsMiddleware({ settings, }: {
|
3498
|
-
settings: Partial<{
|
3499
|
-
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
|
3500
|
-
temperature?: LanguageModelV2CallOptions['temperature'];
|
3501
|
-
stopSequences?: LanguageModelV2CallOptions['stopSequences'];
|
3502
|
-
topP?: LanguageModelV2CallOptions['topP'];
|
3503
|
-
topK?: LanguageModelV2CallOptions['topK'];
|
3504
|
-
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
|
3505
|
-
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
|
3506
|
-
responseFormat?: LanguageModelV2CallOptions['responseFormat'];
|
3507
|
-
seed?: LanguageModelV2CallOptions['seed'];
|
3508
|
-
tools?: LanguageModelV2CallOptions['tools'];
|
3509
|
-
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
3510
|
-
headers?: LanguageModelV2CallOptions['headers'];
|
3511
|
-
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
3512
|
-
}>;
|
3513
|
-
}): LanguageModelV2Middleware;
|
3514
|
-
|
3515
|
-
/**
|
3516
|
-
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
3517
|
-
* as a `reasoning` property on the result.
|
3518
|
-
*
|
3519
|
-
* @param tagName - The name of the XML tag to extract reasoning from.
|
3520
|
-
* @param separator - The separator to use between reasoning and text sections.
|
3521
|
-
* @param startWithReasoning - Whether to start with reasoning tokens.
|
3522
|
-
*/
|
3523
|
-
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
3524
|
-
tagName: string;
|
3525
|
-
separator?: string;
|
3526
|
-
startWithReasoning?: boolean;
|
3527
|
-
}): LanguageModelV2Middleware;
|
3528
|
-
|
3529
|
-
/**
|
3530
|
-
* Simulates streaming chunks with the response from a generate call.
|
3531
|
-
*/
|
3532
|
-
declare function simulateStreamingMiddleware(): LanguageModelV2Middleware;
|
3533
|
-
|
3534
|
-
/**
|
3535
|
-
* Wraps a LanguageModelV2 instance with middleware functionality.
|
3536
|
-
* This function allows you to apply middleware to transform parameters,
|
3537
|
-
* wrap generate operations, and wrap stream operations of a language model.
|
3538
|
-
*
|
3539
|
-
* @param options - Configuration options for wrapping the language model.
|
3540
|
-
* @param options.model - The original LanguageModelV2 instance to be wrapped.
|
3541
|
-
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
3542
|
-
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
3543
|
-
* @param options.providerId - Optional custom provider ID to override the original model's provider.
|
3544
|
-
* @returns A new LanguageModelV2 instance with middleware applied.
|
3545
|
-
*/
|
3546
|
-
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
3547
|
-
model: LanguageModelV2;
|
3548
|
-
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
3549
|
-
modelId?: string;
|
3550
|
-
providerId?: string;
|
3551
|
-
}) => LanguageModelV2;
|
3552
|
-
|
3553
|
-
/**
|
3554
|
-
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
3555
|
-
*
|
3556
|
-
* @param {Object} options - The options for creating the custom provider.
|
3557
|
-
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
3558
|
-
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
3559
|
-
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
3560
|
-
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
3561
|
-
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
3562
|
-
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
3563
|
-
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
3564
|
-
*
|
3565
|
-
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
3566
|
-
*/
|
3567
|
-
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV2>, SPEECH_MODELS extends Record<string, SpeechModelV2>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
3568
|
-
languageModels?: LANGUAGE_MODELS;
|
3569
|
-
textEmbeddingModels?: EMBEDDING_MODELS;
|
3570
|
-
imageModels?: IMAGE_MODELS;
|
3571
|
-
transcriptionModels?: TRANSCRIPTION_MODELS;
|
3572
|
-
speechModels?: SPEECH_MODELS;
|
3573
|
-
fallbackProvider?: ProviderV2;
|
3574
|
-
}): ProviderV2 & {
|
3575
|
-
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
3576
|
-
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
|
3577
|
-
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
3578
|
-
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV2;
|
3579
|
-
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV2;
|
3574
|
+
declare const UI_MESSAGE_STREAM_HEADERS: {
|
3575
|
+
'content-type': string;
|
3576
|
+
'cache-control': string;
|
3577
|
+
connection: string;
|
3578
|
+
'x-vercel-ai-ui-message-stream': string;
|
3579
|
+
'x-accel-buffering': string;
|
3580
3580
|
};
|
3581
|
-
/**
|
3582
|
-
* @deprecated Use `customProvider` instead.
|
3583
|
-
*/
|
3584
|
-
declare const experimental_customProvider: typeof customProvider;
|
3585
|
-
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
3586
|
-
|
3587
|
-
declare const symbol: unique symbol;
|
3588
|
-
declare class NoSuchProviderError extends NoSuchModelError {
|
3589
|
-
private readonly [symbol];
|
3590
|
-
readonly providerId: string;
|
3591
|
-
readonly availableProviders: string[];
|
3592
|
-
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
3593
|
-
modelId: string;
|
3594
|
-
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
|
3595
|
-
providerId: string;
|
3596
|
-
availableProviders: string[];
|
3597
|
-
message?: string;
|
3598
|
-
});
|
3599
|
-
static isInstance(error: unknown): error is NoSuchProviderError;
|
3600
|
-
}
|
3601
|
-
|
3602
|
-
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
3603
|
-
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
3604
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
3605
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
3606
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
|
3607
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
|
3608
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
3609
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
3610
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV2;
|
3611
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV2;
|
3612
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV2;
|
3613
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV2;
|
3614
|
-
}
|
3615
|
-
/**
|
3616
|
-
* Creates a registry for the given providers.
|
3617
|
-
*/
|
3618
|
-
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
|
3619
|
-
separator?: SEPARATOR;
|
3620
|
-
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
3621
|
-
/**
|
3622
|
-
* @deprecated Use `createProviderRegistry` instead.
|
3623
|
-
*/
|
3624
|
-
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
3625
3581
|
|
3626
|
-
|
3627
|
-
|
3628
|
-
|
3629
|
-
|
3630
|
-
|
3631
|
-
|
3632
|
-
|
3633
|
-
|
3634
|
-
|
3635
|
-
|
3636
|
-
|
3637
|
-
|
3638
|
-
|
3639
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3640
|
-
}, z.core.$loose>;
|
3641
|
-
}, z.core.$strict>;
|
3642
|
-
type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
|
3643
|
-
declare const JSONRPCErrorSchema: z.ZodObject<{
|
3644
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3645
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3646
|
-
error: z.ZodObject<{
|
3647
|
-
code: z.ZodNumber;
|
3648
|
-
message: z.ZodString;
|
3649
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
3650
|
-
}, z.core.$strip>;
|
3651
|
-
}, z.core.$strict>;
|
3652
|
-
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
3653
|
-
declare const JSONRPCNotificationSchema: z.ZodObject<{
|
3654
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3655
|
-
method: z.ZodString;
|
3656
|
-
params: z.ZodOptional<z.ZodObject<{
|
3657
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3658
|
-
}, z.core.$loose>>;
|
3659
|
-
}, z.core.$strict>;
|
3660
|
-
type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
|
3661
|
-
declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
|
3662
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3663
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3664
|
-
method: z.ZodString;
|
3665
|
-
params: z.ZodOptional<z.ZodObject<{
|
3666
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3667
|
-
}, z.core.$loose>>;
|
3668
|
-
}, z.core.$strict>, z.ZodObject<{
|
3669
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3670
|
-
method: z.ZodString;
|
3671
|
-
params: z.ZodOptional<z.ZodObject<{
|
3672
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3673
|
-
}, z.core.$loose>>;
|
3674
|
-
}, z.core.$strict>, z.ZodObject<{
|
3675
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3676
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3677
|
-
result: z.ZodObject<{
|
3678
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3679
|
-
}, z.core.$loose>;
|
3680
|
-
}, z.core.$strict>, z.ZodObject<{
|
3681
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3682
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3683
|
-
error: z.ZodObject<{
|
3684
|
-
code: z.ZodNumber;
|
3685
|
-
message: z.ZodString;
|
3686
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
3687
|
-
}, z.core.$strip>;
|
3688
|
-
}, z.core.$strict>]>;
|
3689
|
-
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
3582
|
+
interface ChatTransport<UI_MESSAGE extends UIMessage> {
|
3583
|
+
sendMessages: (options: {
|
3584
|
+
chatId: string;
|
3585
|
+
messages: UI_MESSAGE[];
|
3586
|
+
abortSignal: AbortSignal | undefined;
|
3587
|
+
} & {
|
3588
|
+
trigger: 'submit-user-message' | 'submit-tool-result' | 'regenerate-assistant-message';
|
3589
|
+
messageId: string | undefined;
|
3590
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
|
3591
|
+
reconnectToStream: (options: {
|
3592
|
+
chatId: string;
|
3593
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
3594
|
+
}
|
3690
3595
|
|
3691
|
-
|
3692
|
-
|
3693
|
-
|
3694
|
-
|
3695
|
-
|
3596
|
+
type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
|
3597
|
+
id?: UI_MESSAGE['id'];
|
3598
|
+
role?: UI_MESSAGE['role'];
|
3599
|
+
};
|
3600
|
+
type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
3601
|
+
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
3602
|
+
[K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
|
3603
|
+
};
|
3604
|
+
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
3605
|
+
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
3606
|
+
};
|
3607
|
+
type ChatRequestOptions = {
|
3696
3608
|
/**
|
3697
|
-
|
3609
|
+
Additional headers that should be to be passed to the API endpoint.
|
3698
3610
|
*/
|
3699
|
-
|
3611
|
+
headers?: Record<string, string> | Headers;
|
3700
3612
|
/**
|
3701
|
-
|
3702
|
-
* @param message The JSON-RPC message to send
|
3613
|
+
Additional body JSON properties that should be sent to the API endpoint.
|
3703
3614
|
*/
|
3704
|
-
|
3615
|
+
body?: object;
|
3616
|
+
metadata?: unknown;
|
3617
|
+
};
|
3618
|
+
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
3619
|
+
interface ChatState<UI_MESSAGE extends UIMessage> {
|
3620
|
+
status: ChatStatus;
|
3621
|
+
error: Error | undefined;
|
3622
|
+
messages: UI_MESSAGE[];
|
3623
|
+
pushMessage: (message: UI_MESSAGE) => void;
|
3624
|
+
popMessage: () => void;
|
3625
|
+
replaceMessage: (index: number, message: UI_MESSAGE) => void;
|
3626
|
+
snapshot: <T>(thing: T) => T;
|
3627
|
+
}
|
3628
|
+
type ChatOnErrorCallback = (error: Error) => void;
|
3629
|
+
type ChatOnToolCallCallback = ({ toolCall, }: {
|
3630
|
+
toolCall: ToolCall<string, unknown>;
|
3631
|
+
}) => void | Promise<unknown> | unknown;
|
3632
|
+
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
3633
|
+
type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
|
3634
|
+
message: UI_MESSAGE;
|
3635
|
+
}) => void;
|
3636
|
+
interface ChatInit<UI_MESSAGE extends UIMessage> {
|
3705
3637
|
/**
|
3706
|
-
*
|
3638
|
+
* A unique identifier for the chat. If not provided, a random one will be
|
3639
|
+
* generated.
|
3707
3640
|
*/
|
3708
|
-
|
3641
|
+
id?: string;
|
3642
|
+
messageMetadataSchema?: Validator<InferUIMessageMetadata<UI_MESSAGE>> | StandardSchemaV1<InferUIMessageMetadata<UI_MESSAGE>>;
|
3643
|
+
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
3644
|
+
messages?: UI_MESSAGE[];
|
3709
3645
|
/**
|
3710
|
-
*
|
3646
|
+
* A way to provide a function that is going to be used for ids for messages and the chat.
|
3647
|
+
* If not provided the default AI SDK `generateId` is used.
|
3711
3648
|
*/
|
3712
|
-
|
3649
|
+
generateId?: IdGenerator;
|
3650
|
+
transport?: ChatTransport<UI_MESSAGE>;
|
3651
|
+
maxSteps?: number;
|
3713
3652
|
/**
|
3714
|
-
*
|
3653
|
+
* Callback function to be called when an error is encountered.
|
3715
3654
|
*/
|
3716
|
-
|
3655
|
+
onError?: ChatOnErrorCallback;
|
3717
3656
|
/**
|
3718
|
-
|
3657
|
+
Optional callback function that is invoked when a tool call is received.
|
3658
|
+
Intended for automatic client-side tool execution.
|
3659
|
+
|
3660
|
+
You can optionally return a result for the tool call,
|
3661
|
+
either synchronously or asynchronously.
|
3662
|
+
*/
|
3663
|
+
onToolCall?: ChatOnToolCallCallback;
|
3664
|
+
/**
|
3665
|
+
* Optional callback function that is called when the assistant message is finished streaming.
|
3666
|
+
*
|
3667
|
+
* @param message The message that was streamed.
|
3719
3668
|
*/
|
3720
|
-
|
3669
|
+
onFinish?: ChatOnFinishCallback<UI_MESSAGE>;
|
3670
|
+
/**
|
3671
|
+
* Optional callback function that is called when a data part is received.
|
3672
|
+
*
|
3673
|
+
* @param data The data part that was received.
|
3674
|
+
*/
|
3675
|
+
onData?: ChatOnDataCallback<UI_MESSAGE>;
|
3721
3676
|
}
|
3722
|
-
|
3723
|
-
|
3677
|
+
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
3678
|
+
readonly id: string;
|
3679
|
+
readonly generateId: IdGenerator;
|
3680
|
+
protected state: ChatState<UI_MESSAGE>;
|
3681
|
+
private messageMetadataSchema;
|
3682
|
+
private dataPartSchemas;
|
3683
|
+
private readonly transport;
|
3684
|
+
private maxSteps;
|
3685
|
+
private onError?;
|
3686
|
+
private onToolCall?;
|
3687
|
+
private onFinish?;
|
3688
|
+
private onData?;
|
3689
|
+
private activeResponse;
|
3690
|
+
private jobExecutor;
|
3691
|
+
constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
3692
|
+
state: ChatState<UI_MESSAGE>;
|
3693
|
+
});
|
3724
3694
|
/**
|
3725
|
-
*
|
3695
|
+
* Hook status:
|
3696
|
+
*
|
3697
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
3698
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
3699
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
3700
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
3726
3701
|
*/
|
3727
|
-
|
3702
|
+
get status(): ChatStatus;
|
3703
|
+
protected setStatus({ status, error, }: {
|
3704
|
+
status: ChatStatus;
|
3705
|
+
error?: Error;
|
3706
|
+
}): void;
|
3707
|
+
get error(): Error | undefined;
|
3708
|
+
get messages(): UI_MESSAGE[];
|
3709
|
+
get lastMessage(): UI_MESSAGE | undefined;
|
3710
|
+
set messages(messages: UI_MESSAGE[]);
|
3728
3711
|
/**
|
3729
|
-
*
|
3712
|
+
* Appends or replaces a user message to the chat list. This triggers the API call to fetch
|
3713
|
+
* the assistant's response.
|
3714
|
+
*
|
3715
|
+
* If a messageId is provided, the message will be replaced.
|
3730
3716
|
*/
|
3731
|
-
|
3732
|
-
|
3733
|
-
|
3734
|
-
|
3735
|
-
|
3736
|
-
|
3737
|
-
|
3738
|
-
|
3739
|
-
|
3740
|
-
|
3741
|
-
|
3742
|
-
|
3743
|
-
|
3744
|
-
|
3745
|
-
|
3746
|
-
|
3747
|
-
content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
3748
|
-
type: z.ZodLiteral<"text">;
|
3749
|
-
text: z.ZodString;
|
3750
|
-
}, z.core.$loose>, z.ZodObject<{
|
3751
|
-
type: z.ZodLiteral<"image">;
|
3752
|
-
data: z.ZodString;
|
3753
|
-
mimeType: z.ZodString;
|
3754
|
-
}, z.core.$loose>, z.ZodObject<{
|
3755
|
-
type: z.ZodLiteral<"resource">;
|
3756
|
-
resource: z.ZodUnion<readonly [z.ZodObject<{
|
3757
|
-
uri: z.ZodString;
|
3758
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
3759
|
-
text: z.ZodString;
|
3760
|
-
}, z.core.$loose>, z.ZodObject<{
|
3761
|
-
uri: z.ZodString;
|
3762
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
3763
|
-
blob: z.ZodString;
|
3764
|
-
}, z.core.$loose>]>;
|
3765
|
-
}, z.core.$loose>]>>;
|
3766
|
-
isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
|
3767
|
-
}, z.core.$loose>, z.ZodObject<{
|
3768
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3769
|
-
toolResult: z.ZodUnknown;
|
3770
|
-
}, z.core.$loose>]>;
|
3771
|
-
type CallToolResult = z.infer<typeof CallToolResultSchema>;
|
3772
|
-
|
3773
|
-
interface MCPClientConfig {
|
3774
|
-
/** Transport configuration for connecting to the MCP server */
|
3775
|
-
transport: MCPTransportConfig | MCPTransport;
|
3776
|
-
/** Optional callback for uncaught errors */
|
3777
|
-
onUncaughtError?: (error: unknown) => void;
|
3778
|
-
/** Optional client name, defaults to 'ai-sdk-mcp-client' */
|
3779
|
-
name?: string;
|
3780
|
-
}
|
3781
|
-
declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
|
3782
|
-
/**
|
3783
|
-
* A lightweight MCP Client implementation
|
3784
|
-
*
|
3785
|
-
* The primary purpose of this client is tool conversion between MCP<>AI SDK
|
3786
|
-
* but can later be extended to support other MCP features
|
3787
|
-
*
|
3788
|
-
* Tool parameters are automatically inferred from the server's JSON schema
|
3789
|
-
* if not explicitly provided in the tools configuration
|
3790
|
-
*
|
3791
|
-
* This client is meant to be used to communicate with a single server. To communicate and fetch tools across multiple servers, it's recommended to create a new client instance per server.
|
3792
|
-
*
|
3793
|
-
* Not supported:
|
3794
|
-
* - Client options (e.g. sampling, roots) as they are not needed for tool conversion
|
3795
|
-
* - Accepting notifications
|
3796
|
-
* - Session management (when passing a sessionId to an instance of the Streamable HTTP transport)
|
3797
|
-
* - Resumable SSE streams
|
3798
|
-
*/
|
3799
|
-
declare class MCPClient {
|
3800
|
-
private transport;
|
3801
|
-
private onUncaughtError?;
|
3802
|
-
private clientInfo;
|
3803
|
-
private requestMessageId;
|
3804
|
-
private responseHandlers;
|
3805
|
-
private serverCapabilities;
|
3806
|
-
private isClosed;
|
3807
|
-
constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
|
3808
|
-
init(): Promise<this>;
|
3809
|
-
close(): Promise<void>;
|
3810
|
-
private assertCapability;
|
3811
|
-
private request;
|
3812
|
-
private listTools;
|
3813
|
-
private callTool;
|
3814
|
-
private notification;
|
3717
|
+
sendMessage: (message: (CreateUIMessage<UI_MESSAGE> & {
|
3718
|
+
text?: never;
|
3719
|
+
files?: never;
|
3720
|
+
messageId?: string;
|
3721
|
+
}) | {
|
3722
|
+
text: string;
|
3723
|
+
files?: FileList | FileUIPart[];
|
3724
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
3725
|
+
parts?: never;
|
3726
|
+
messageId?: string;
|
3727
|
+
} | {
|
3728
|
+
files: FileList | FileUIPart[];
|
3729
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
3730
|
+
parts?: never;
|
3731
|
+
messageId?: string;
|
3732
|
+
}, options?: ChatRequestOptions) => Promise<void>;
|
3815
3733
|
/**
|
3816
|
-
*
|
3817
|
-
*
|
3734
|
+
* Regenerate the assistant message with the provided message id.
|
3735
|
+
* If no message id is provided, the last assistant message will be regenerated.
|
3818
3736
|
*/
|
3819
|
-
|
3820
|
-
|
3821
|
-
})
|
3822
|
-
|
3823
|
-
|
3824
|
-
|
3737
|
+
regenerate: ({ messageId, ...options }?: {
|
3738
|
+
messageId?: string;
|
3739
|
+
} & ChatRequestOptions) => Promise<void>;
|
3740
|
+
/**
|
3741
|
+
* Attempt to resume an ongoing streaming response.
|
3742
|
+
*/
|
3743
|
+
resumeStream: (options?: ChatRequestOptions) => Promise<void>;
|
3744
|
+
addToolResult: ({ toolCallId, output, }: {
|
3745
|
+
toolCallId: string;
|
3746
|
+
output: unknown;
|
3747
|
+
}) => Promise<void>;
|
3748
|
+
/**
|
3749
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
3750
|
+
*/
|
3751
|
+
stop: () => Promise<void>;
|
3752
|
+
private makeRequest;
|
3825
3753
|
}
|
3826
3754
|
|
3755
|
+
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
3756
|
+
|
3827
3757
|
/**
|
3828
|
-
|
3829
|
-
|
3758
|
+
Converts an array of messages from useChat into an array of CoreMessages that can be used
|
3759
|
+
with the AI core functions (e.g. `streamText`).
|
3760
|
+
|
3761
|
+
@param messages - The messages to convert.
|
3762
|
+
@param options.tools - The tools to use.
|
3763
|
+
@param options.ignoreIncompleteToolCalls - Whether to ignore incomplete tool calls. Default is `false`.
|
3830
3764
|
*/
|
3831
|
-
|
3765
|
+
declare function convertToModelMessages(messages: Array<Omit<UIMessage, 'id'>>, options?: {
|
3766
|
+
tools?: ToolSet;
|
3767
|
+
ignoreIncompleteToolCalls?: boolean;
|
3768
|
+
}): ModelMessage[];
|
3769
|
+
/**
|
3770
|
+
@deprecated Use `convertToModelMessages` instead.
|
3771
|
+
*/
|
3772
|
+
declare const convertToCoreMessages: typeof convertToModelMessages;
|
3773
|
+
|
3774
|
+
type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
|
3775
|
+
id: string;
|
3776
|
+
messages: UI_MESSAGE[];
|
3777
|
+
requestMetadata: unknown;
|
3778
|
+
body: Record<string, any> | undefined;
|
3779
|
+
credentials: RequestCredentials | undefined;
|
3780
|
+
headers: HeadersInit | undefined;
|
3781
|
+
api: string;
|
3782
|
+
} & {
|
3783
|
+
trigger: 'submit-user-message' | 'submit-tool-result' | 'regenerate-assistant-message';
|
3784
|
+
messageId: string | undefined;
|
3785
|
+
}) => {
|
3786
|
+
body: object;
|
3787
|
+
headers?: HeadersInit;
|
3788
|
+
credentials?: RequestCredentials;
|
3789
|
+
api?: string;
|
3790
|
+
} | PromiseLike<{
|
3791
|
+
body: object;
|
3792
|
+
headers?: HeadersInit;
|
3793
|
+
credentials?: RequestCredentials;
|
3794
|
+
api?: string;
|
3795
|
+
}>;
|
3796
|
+
type PrepareReconnectToStreamRequest = (options: {
|
3797
|
+
id: string;
|
3798
|
+
requestMetadata: unknown;
|
3799
|
+
body: Record<string, any> | undefined;
|
3800
|
+
credentials: RequestCredentials | undefined;
|
3801
|
+
headers: HeadersInit | undefined;
|
3802
|
+
api: string;
|
3803
|
+
}) => {
|
3804
|
+
headers?: HeadersInit;
|
3805
|
+
credentials?: RequestCredentials;
|
3806
|
+
api?: string;
|
3807
|
+
} | PromiseLike<{
|
3808
|
+
headers?: HeadersInit;
|
3809
|
+
credentials?: RequestCredentials;
|
3810
|
+
api?: string;
|
3811
|
+
}>;
|
3812
|
+
/**
|
3813
|
+
* Options for the `HttpChatTransport` class.
|
3814
|
+
*
|
3815
|
+
* @param UI_MESSAGE - The type of message to be used in the chat.
|
3816
|
+
*/
|
3817
|
+
type HttpChatTransportInitOptions<UI_MESSAGE extends UIMessage> = {
|
3832
3818
|
/**
|
3833
|
-
* The
|
3819
|
+
* The API URL to be used for the chat transport.
|
3820
|
+
* Defaults to '/api/chat'.
|
3834
3821
|
*/
|
3835
|
-
|
3822
|
+
api?: string;
|
3836
3823
|
/**
|
3837
|
-
*
|
3838
|
-
*
|
3824
|
+
* The credentials mode to be used for the fetch request.
|
3825
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
3826
|
+
* Defaults to 'same-origin'.
|
3839
3827
|
*/
|
3840
|
-
|
3841
|
-
/**
|
3842
|
-
* The text content of this segment.
|
3843
|
-
*/
|
3844
|
-
readonly text: string;
|
3845
|
-
/**
|
3846
|
-
* The start time of this segment in seconds.
|
3847
|
-
*/
|
3848
|
-
readonly startSecond: number;
|
3849
|
-
/**
|
3850
|
-
* The end time of this segment in seconds.
|
3851
|
-
*/
|
3852
|
-
readonly endSecond: number;
|
3853
|
-
}>;
|
3828
|
+
credentials?: Resolvable<RequestCredentials>;
|
3854
3829
|
/**
|
3855
|
-
*
|
3856
|
-
* May be undefined if the language couldn't be detected.
|
3830
|
+
* HTTP headers to be sent with the API request.
|
3857
3831
|
*/
|
3858
|
-
|
3832
|
+
headers?: Resolvable<Record<string, string> | Headers>;
|
3859
3833
|
/**
|
3860
|
-
*
|
3861
|
-
*
|
3834
|
+
* Extra body object to be sent with the API request.
|
3835
|
+
* @example
|
3836
|
+
* Send a `sessionId` to the API along with the messages.
|
3837
|
+
* ```js
|
3838
|
+
* useChat({
|
3839
|
+
* body: {
|
3840
|
+
* sessionId: '123',
|
3841
|
+
* }
|
3842
|
+
* })
|
3843
|
+
* ```
|
3862
3844
|
*/
|
3863
|
-
|
3845
|
+
body?: Resolvable<object>;
|
3864
3846
|
/**
|
3865
|
-
|
3866
|
-
|
3867
|
-
|
3847
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
3848
|
+
or to provide a custom fetch implementation for e.g. testing.
|
3849
|
+
*/
|
3850
|
+
fetch?: FetchFunction;
|
3868
3851
|
/**
|
3869
|
-
|
3852
|
+
* When a function is provided, it will be used
|
3853
|
+
* to prepare the request body for the chat API. This can be useful for
|
3854
|
+
* customizing the request body based on the messages and data in the chat.
|
3855
|
+
*
|
3856
|
+
* @param id The id of the chat.
|
3857
|
+
* @param messages The current messages in the chat.
|
3858
|
+
* @param requestBody The request body object passed in the chat request.
|
3870
3859
|
*/
|
3871
|
-
|
3860
|
+
prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
3872
3861
|
/**
|
3873
|
-
|
3862
|
+
* When a function is provided, it will be used
|
3863
|
+
* to prepare the request body for the chat API. This can be useful for
|
3864
|
+
* customizing the request body based on the messages and data in the chat.
|
3865
|
+
*
|
3866
|
+
* @param id The id of the chat.
|
3867
|
+
* @param messages The current messages in the chat.
|
3868
|
+
* @param requestBody The request body object passed in the chat request.
|
3874
3869
|
*/
|
3875
|
-
|
3870
|
+
prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
3871
|
+
};
|
3872
|
+
declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
|
3873
|
+
protected api: string;
|
3874
|
+
protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];
|
3875
|
+
protected headers: HttpChatTransportInitOptions<UI_MESSAGE>['headers'];
|
3876
|
+
protected body: HttpChatTransportInitOptions<UI_MESSAGE>['body'];
|
3877
|
+
protected fetch?: FetchFunction;
|
3878
|
+
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
3879
|
+
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
3880
|
+
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3881
|
+
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
3882
|
+
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
3883
|
+
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3876
3884
|
}
|
3877
3885
|
|
3878
|
-
|
3879
|
-
|
3886
|
+
declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
3887
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3888
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3889
|
+
}
|
3880
3890
|
|
3881
|
-
|
3882
|
-
|
3883
|
-
|
3884
|
-
|
3885
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3886
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3887
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3891
|
+
declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
3892
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3893
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3894
|
+
}
|
3888
3895
|
|
3889
|
-
|
3890
|
-
*/
|
3891
|
-
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3896
|
+
type CompletionRequestOptions = {
|
3892
3897
|
/**
|
3893
|
-
|
3898
|
+
An optional object of headers to be passed to the API endpoint.
|
3899
|
+
*/
|
3900
|
+
headers?: Record<string, string> | Headers;
|
3901
|
+
/**
|
3902
|
+
An optional object to be passed to the API endpoint.
|
3894
3903
|
*/
|
3895
|
-
|
3904
|
+
body?: object;
|
3905
|
+
};
|
3906
|
+
type UseCompletionOptions = {
|
3896
3907
|
/**
|
3897
|
-
|
3908
|
+
* The API endpoint that accepts a `{ prompt: string }` object and returns
|
3909
|
+
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
|
3898
3910
|
*/
|
3899
|
-
|
3911
|
+
api?: string;
|
3900
3912
|
/**
|
3901
|
-
|
3902
|
-
|
3903
|
-
|
3904
|
-
|
3905
|
-
|
3906
|
-
```ts
|
3907
|
-
{
|
3908
|
-
"openai": {
|
3909
|
-
"temperature": 0
|
3910
|
-
}
|
3911
|
-
}
|
3912
|
-
```
|
3913
|
-
*/
|
3914
|
-
providerOptions?: ProviderOptions;
|
3913
|
+
* An unique identifier for the chat. If not provided, a random one will be
|
3914
|
+
* generated. When provided, the `useChat` hook with the same `id` will
|
3915
|
+
* have shared states across components.
|
3916
|
+
*/
|
3917
|
+
id?: string;
|
3915
3918
|
/**
|
3916
|
-
|
3917
|
-
|
3918
|
-
@default 2
|
3919
|
+
* Initial prompt input of the completion.
|
3919
3920
|
*/
|
3920
|
-
|
3921
|
+
initialInput?: string;
|
3921
3922
|
/**
|
3922
|
-
|
3923
|
-
|
3924
|
-
|
3923
|
+
* Initial completion result. Useful to load an existing history.
|
3924
|
+
*/
|
3925
|
+
initialCompletion?: string;
|
3925
3926
|
/**
|
3926
|
-
|
3927
|
-
|
3928
|
-
|
3929
|
-
|
3930
|
-
|
3927
|
+
* Callback function to be called when the completion is finished streaming.
|
3928
|
+
*/
|
3929
|
+
onFinish?: (prompt: string, completion: string) => void;
|
3930
|
+
/**
|
3931
|
+
* Callback function to be called when an error is encountered.
|
3932
|
+
*/
|
3933
|
+
onError?: (error: Error) => void;
|
3934
|
+
/**
|
3935
|
+
* The credentials mode to be used for the fetch request.
|
3936
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
3937
|
+
* Defaults to 'same-origin'.
|
3938
|
+
*/
|
3939
|
+
credentials?: RequestCredentials;
|
3940
|
+
/**
|
3941
|
+
* HTTP headers to be sent with the API request.
|
3942
|
+
*/
|
3943
|
+
headers?: Record<string, string> | Headers;
|
3944
|
+
/**
|
3945
|
+
* Extra body object to be sent with the API request.
|
3946
|
+
* @example
|
3947
|
+
* Send a `sessionId` to the API along with the prompt.
|
3948
|
+
* ```js
|
3949
|
+
* useChat({
|
3950
|
+
* body: {
|
3951
|
+
* sessionId: '123',
|
3952
|
+
* }
|
3953
|
+
* })
|
3954
|
+
* ```
|
3955
|
+
*/
|
3956
|
+
body?: object;
|
3957
|
+
/**
|
3958
|
+
Streaming protocol that is used. Defaults to `data`.
|
3959
|
+
*/
|
3960
|
+
streamProtocol?: 'data' | 'text';
|
3961
|
+
/**
|
3962
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
3963
|
+
or to provide a custom fetch implementation for e.g. testing.
|
3964
|
+
*/
|
3965
|
+
fetch?: FetchFunction;
|
3966
|
+
};
|
3931
3967
|
|
3932
3968
|
declare global {
|
3933
3969
|
var AI_SDK_DEFAULT_PROVIDER: ProviderV2 | undefined;
|
3934
3970
|
}
|
3935
3971
|
|
3936
|
-
export { AbstractChat, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferUIDataParts, InferUITool, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolErrorUnion, ToolResultUnion, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UITools, UI_MESSAGE_STREAM_HEADERS, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, hasToolCall, isDeepEqualData, isToolUIPart, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };
|
3972
|
+
export { AbstractChat, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferUIDataParts, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolErrorUnion, ToolResultUnion, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UITools, UI_MESSAGE_STREAM_HEADERS, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, hasToolCall, isDeepEqualData, isToolUIPart, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };
|