ai 5.0.0-beta.10 → 5.0.0-beta.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +1704 -1669
- package/dist/index.d.ts +1704 -1669
- package/dist/index.js +3928 -3917
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3941 -3930
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +17 -17
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +17 -17
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.js +3 -3
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs +3 -3
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/dist/test/index.js +4 -4
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +4 -4
- package/dist/test/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { ModelMessage, Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator,
|
1
|
+
import { ModelMessage, Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, InferSchema, FlexibleSchema, DataContent, Validator, StandardSchemaV1, ToolCall, Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
|
2
2
|
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, generateId, jsonSchema, tool } from '@ai-sdk/provider-utils';
|
3
3
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
4
4
|
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SpeechModelV2, SpeechModelV2CallWarning, TranscriptionModelV2, TranscriptionModelV2CallWarning, LanguageModelV2Usage, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
|
@@ -717,12 +717,14 @@ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Too
|
|
717
717
|
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
718
718
|
stepNumber: number;
|
719
719
|
model: LanguageModel;
|
720
|
+
messages: Array<ModelMessage>;
|
720
721
|
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
|
721
722
|
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
|
722
723
|
model?: LanguageModel;
|
723
724
|
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
724
725
|
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
725
726
|
system?: string;
|
727
|
+
messages?: Array<ModelMessage>;
|
726
728
|
} | undefined;
|
727
729
|
|
728
730
|
type StopCondition<TOOLS extends ToolSet> = (options: {
|
@@ -948,6 +950,236 @@ A function that attempts to repair a tool call that failed to parse.
|
|
948
950
|
};
|
949
951
|
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
950
952
|
|
953
|
+
/**
|
954
|
+
* Detects the first chunk in a buffer.
|
955
|
+
*
|
956
|
+
* @param buffer - The buffer to detect the first chunk in.
|
957
|
+
*
|
958
|
+
* @returns The first detected chunk, or `undefined` if no chunk was detected.
|
959
|
+
*/
|
960
|
+
type ChunkDetector = (buffer: string) => string | undefined | null;
|
961
|
+
/**
|
962
|
+
* Smooths text streaming output.
|
963
|
+
*
|
964
|
+
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
965
|
+
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
966
|
+
*
|
967
|
+
* @returns A transform stream that smooths text streaming output.
|
968
|
+
*/
|
969
|
+
declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
|
970
|
+
delayInMs?: number | null;
|
971
|
+
chunking?: 'word' | 'line' | RegExp | ChunkDetector;
|
972
|
+
/**
|
973
|
+
* Internal. For test use only. May change without notice.
|
974
|
+
*/
|
975
|
+
_internal?: {
|
976
|
+
delay?: (delayInMs: number | null) => Promise<void>;
|
977
|
+
};
|
978
|
+
}): (options: {
|
979
|
+
tools: TOOLS;
|
980
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
981
|
+
|
982
|
+
/**
|
983
|
+
A transformation that is applied to the stream.
|
984
|
+
|
985
|
+
@param stopStream - A function that stops the source stream.
|
986
|
+
@param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
987
|
+
*/
|
988
|
+
type StreamTextTransform<TOOLS extends ToolSet> = (options: {
|
989
|
+
tools: TOOLS;
|
990
|
+
stopStream: () => void;
|
991
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
992
|
+
/**
|
993
|
+
Callback that is set using the `onError` option.
|
994
|
+
|
995
|
+
@param event - The event that is passed to the callback.
|
996
|
+
*/
|
997
|
+
type StreamTextOnErrorCallback = (event: {
|
998
|
+
error: unknown;
|
999
|
+
}) => Promise<void> | void;
|
1000
|
+
/**
|
1001
|
+
Callback that is set using the `onStepFinish` option.
|
1002
|
+
|
1003
|
+
@param stepResult - The result of the step.
|
1004
|
+
*/
|
1005
|
+
type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
1006
|
+
/**
|
1007
|
+
Callback that is set using the `onChunk` option.
|
1008
|
+
|
1009
|
+
@param event - The event that is passed to the callback.
|
1010
|
+
*/
|
1011
|
+
type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
|
1012
|
+
chunk: Extract<TextStreamPart<TOOLS>, {
|
1013
|
+
type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
|
1014
|
+
}>;
|
1015
|
+
}) => Promise<void> | void;
|
1016
|
+
/**
|
1017
|
+
Callback that is set using the `onFinish` option.
|
1018
|
+
|
1019
|
+
@param event - The event that is passed to the callback.
|
1020
|
+
*/
|
1021
|
+
type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
1022
|
+
/**
|
1023
|
+
Details for all steps.
|
1024
|
+
*/
|
1025
|
+
readonly steps: StepResult<TOOLS>[];
|
1026
|
+
/**
|
1027
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
1028
|
+
*/
|
1029
|
+
readonly totalUsage: LanguageModelUsage;
|
1030
|
+
}) => Promise<void> | void;
|
1031
|
+
/**
|
1032
|
+
Generate a text and call tools for a given prompt using a language model.
|
1033
|
+
|
1034
|
+
This function streams the output. If you do not want to stream the output, use `generateText` instead.
|
1035
|
+
|
1036
|
+
@param model - The language model to use.
|
1037
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
1038
|
+
|
1039
|
+
@param system - A system message that will be part of the prompt.
|
1040
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
1041
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
1042
|
+
|
1043
|
+
@param maxOutputTokens - Maximum number of tokens to generate.
|
1044
|
+
@param temperature - Temperature setting.
|
1045
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1046
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1047
|
+
@param topP - Nucleus sampling.
|
1048
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1049
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1050
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
1051
|
+
Used to remove "long tail" low probability responses.
|
1052
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
1053
|
+
@param presencePenalty - Presence penalty setting.
|
1054
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
1055
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1056
|
+
@param frequencyPenalty - Frequency penalty setting.
|
1057
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
1058
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1059
|
+
@param stopSequences - Stop sequences.
|
1060
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
1061
|
+
@param seed - The seed (integer) to use for random sampling.
|
1062
|
+
If set and supported by the model, calls will generate deterministic results.
|
1063
|
+
|
1064
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1065
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1066
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1067
|
+
|
1068
|
+
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
1069
|
+
|
1070
|
+
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
1071
|
+
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
1072
|
+
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1073
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
1074
|
+
(for tools that have an `execute` function) are finished.
|
1075
|
+
|
1076
|
+
@return
|
1077
|
+
A result object for accessing different stream types and additional information.
|
1078
|
+
*/
|
1079
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, includeRawChunks, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1080
|
+
/**
|
1081
|
+
The language model to use.
|
1082
|
+
*/
|
1083
|
+
model: LanguageModel;
|
1084
|
+
/**
|
1085
|
+
The tools that the model can call. The model needs to support calling tools.
|
1086
|
+
*/
|
1087
|
+
tools?: TOOLS;
|
1088
|
+
/**
|
1089
|
+
The tool choice strategy. Default: 'auto'.
|
1090
|
+
*/
|
1091
|
+
toolChoice?: ToolChoice<TOOLS>;
|
1092
|
+
/**
|
1093
|
+
Condition for stopping the generation when there are tool results in the last step.
|
1094
|
+
When the condition is an array, any of the conditions can be met to stop the generation.
|
1095
|
+
|
1096
|
+
@default stepCountIs(1)
|
1097
|
+
*/
|
1098
|
+
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
1099
|
+
/**
|
1100
|
+
Optional telemetry configuration (experimental).
|
1101
|
+
*/
|
1102
|
+
experimental_telemetry?: TelemetrySettings;
|
1103
|
+
/**
|
1104
|
+
Additional provider-specific options. They are passed through
|
1105
|
+
to the provider from the AI SDK and enable provider-specific
|
1106
|
+
functionality that can be fully encapsulated in the provider.
|
1107
|
+
*/
|
1108
|
+
providerOptions?: ProviderOptions;
|
1109
|
+
/**
|
1110
|
+
* @deprecated Use `activeTools` instead.
|
1111
|
+
*/
|
1112
|
+
experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
|
1113
|
+
/**
|
1114
|
+
Limits the tools that are available for the model to call without
|
1115
|
+
changing the tool call and result types in the result.
|
1116
|
+
*/
|
1117
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
1118
|
+
/**
|
1119
|
+
Optional specification for parsing structured outputs from the LLM response.
|
1120
|
+
*/
|
1121
|
+
experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
|
1122
|
+
/**
|
1123
|
+
Optional function that you can use to provide different settings for a step.
|
1124
|
+
|
1125
|
+
@param options - The options for the step.
|
1126
|
+
@param options.steps - The steps that have been executed so far.
|
1127
|
+
@param options.stepNumber - The number of the step that is being executed.
|
1128
|
+
@param options.model - The model that is being used.
|
1129
|
+
|
1130
|
+
@returns An object that contains the settings for the step.
|
1131
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
1132
|
+
*/
|
1133
|
+
prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
|
1134
|
+
/**
|
1135
|
+
A function that attempts to repair a tool call that failed to parse.
|
1136
|
+
*/
|
1137
|
+
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
1138
|
+
/**
|
1139
|
+
Optional stream transformations.
|
1140
|
+
They are applied in the order they are provided.
|
1141
|
+
The stream transformations must maintain the stream structure for streamText to work correctly.
|
1142
|
+
*/
|
1143
|
+
experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
|
1144
|
+
/**
|
1145
|
+
Whether to include raw chunks from the provider in the stream.
|
1146
|
+
When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
|
1147
|
+
This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
|
1148
|
+
Defaults to false.
|
1149
|
+
*/
|
1150
|
+
includeRawChunks?: boolean;
|
1151
|
+
/**
|
1152
|
+
Callback that is called for each chunk of the stream.
|
1153
|
+
The stream processing will pause until the callback promise is resolved.
|
1154
|
+
*/
|
1155
|
+
onChunk?: StreamTextOnChunkCallback<TOOLS>;
|
1156
|
+
/**
|
1157
|
+
Callback that is invoked when an error occurs during streaming.
|
1158
|
+
You can use it to log errors.
|
1159
|
+
The stream processing will pause until the callback promise is resolved.
|
1160
|
+
*/
|
1161
|
+
onError?: StreamTextOnErrorCallback;
|
1162
|
+
/**
|
1163
|
+
Callback that is called when the LLM response and all request tool executions
|
1164
|
+
(for tools that have an `execute` function) are finished.
|
1165
|
+
|
1166
|
+
The usage is the combined usage of all steps.
|
1167
|
+
*/
|
1168
|
+
onFinish?: StreamTextOnFinishCallback<TOOLS>;
|
1169
|
+
/**
|
1170
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1171
|
+
*/
|
1172
|
+
onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
|
1173
|
+
/**
|
1174
|
+
Internal. For test use only. May change without notice.
|
1175
|
+
*/
|
1176
|
+
_internal?: {
|
1177
|
+
now?: () => number;
|
1178
|
+
generateId?: IdGenerator;
|
1179
|
+
currentDate?: () => Date;
|
1180
|
+
};
|
1181
|
+
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
1182
|
+
|
951
1183
|
/**
|
952
1184
|
The data types that can be used in the UI message for the UI message data parts.
|
953
1185
|
*/
|
@@ -956,10 +1188,19 @@ type UITool = {
|
|
956
1188
|
input: unknown;
|
957
1189
|
output: unknown | undefined;
|
958
1190
|
};
|
1191
|
+
/**
|
1192
|
+
* Infer the input and output types of a tool so it can be used as a UI tool.
|
1193
|
+
*/
|
959
1194
|
type InferUITool<TOOL extends Tool> = {
|
960
1195
|
input: InferToolInput<TOOL>;
|
961
1196
|
output: InferToolOutput<TOOL>;
|
962
1197
|
};
|
1198
|
+
/**
|
1199
|
+
* Infer the input and output types of a tool set so it can be used as a UI tool set.
|
1200
|
+
*/
|
1201
|
+
type InferUITools<TOOLS extends ToolSet> = {
|
1202
|
+
[NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
|
1203
|
+
};
|
963
1204
|
type UITools = Record<string, UITool>;
|
964
1205
|
/**
|
965
1206
|
AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
|
@@ -1085,18 +1326,24 @@ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
|
|
1085
1326
|
state: 'input-streaming';
|
1086
1327
|
input: DeepPartial<TOOLS[NAME]['input']>;
|
1087
1328
|
providerExecuted?: boolean;
|
1329
|
+
output?: never;
|
1330
|
+
errorText?: never;
|
1088
1331
|
} | {
|
1089
1332
|
state: 'input-available';
|
1090
1333
|
input: TOOLS[NAME]['input'];
|
1091
1334
|
providerExecuted?: boolean;
|
1335
|
+
output?: never;
|
1336
|
+
errorText?: never;
|
1092
1337
|
} | {
|
1093
1338
|
state: 'output-available';
|
1094
1339
|
input: TOOLS[NAME]['input'];
|
1095
1340
|
output: TOOLS[NAME]['output'];
|
1341
|
+
errorText?: never;
|
1096
1342
|
providerExecuted?: boolean;
|
1097
1343
|
} | {
|
1098
1344
|
state: 'output-error';
|
1099
1345
|
input: TOOLS[NAME]['input'];
|
1346
|
+
output?: never;
|
1100
1347
|
errorText: string;
|
1101
1348
|
providerExecuted?: boolean;
|
1102
1349
|
});
|
@@ -1619,26 +1866,196 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
|
|
1619
1866
|
}): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
1620
1867
|
}
|
1621
1868
|
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1625
|
-
|
1626
|
-
|
1627
|
-
|
1628
|
-
|
1629
|
-
|
1630
|
-
|
1631
|
-
|
1632
|
-
|
1633
|
-
|
1634
|
-
|
1635
|
-
|
1636
|
-
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1869
|
+
/**
|
1870
|
+
The result of an `embed` call.
|
1871
|
+
It contains the embedding, the value, and additional information.
|
1872
|
+
*/
|
1873
|
+
interface EmbedResult<VALUE> {
|
1874
|
+
/**
|
1875
|
+
The value that was embedded.
|
1876
|
+
*/
|
1877
|
+
readonly value: VALUE;
|
1878
|
+
/**
|
1879
|
+
The embedding of the value.
|
1880
|
+
*/
|
1881
|
+
readonly embedding: Embedding;
|
1882
|
+
/**
|
1883
|
+
The embedding token usage.
|
1884
|
+
*/
|
1885
|
+
readonly usage: EmbeddingModelUsage;
|
1886
|
+
/**
|
1887
|
+
Optional response data.
|
1888
|
+
*/
|
1889
|
+
readonly response?: {
|
1890
|
+
/**
|
1891
|
+
Response headers.
|
1892
|
+
*/
|
1893
|
+
headers?: Record<string, string>;
|
1894
|
+
/**
|
1895
|
+
The response body.
|
1896
|
+
*/
|
1897
|
+
body?: unknown;
|
1898
|
+
};
|
1899
|
+
}
|
1900
|
+
|
1901
|
+
/**
|
1902
|
+
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
1903
|
+
|
1904
|
+
@param model - The embedding model to use.
|
1905
|
+
@param value - The value that should be embedded.
|
1906
|
+
|
1907
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1908
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1909
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1910
|
+
|
1911
|
+
@returns A result object that contains the embedding, the value, and additional information.
|
1912
|
+
*/
|
1913
|
+
declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
1914
|
+
/**
|
1915
|
+
The embedding model to use.
|
1916
|
+
*/
|
1917
|
+
model: EmbeddingModel<VALUE>;
|
1918
|
+
/**
|
1919
|
+
The value that should be embedded.
|
1920
|
+
*/
|
1921
|
+
value: VALUE;
|
1922
|
+
/**
|
1923
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
1924
|
+
|
1925
|
+
@default 2
|
1926
|
+
*/
|
1927
|
+
maxRetries?: number;
|
1928
|
+
/**
|
1929
|
+
Abort signal.
|
1930
|
+
*/
|
1931
|
+
abortSignal?: AbortSignal;
|
1932
|
+
/**
|
1933
|
+
Additional headers to include in the request.
|
1934
|
+
Only applicable for HTTP-based providers.
|
1935
|
+
*/
|
1936
|
+
headers?: Record<string, string>;
|
1937
|
+
/**
|
1938
|
+
Additional provider-specific options. They are passed through
|
1939
|
+
to the provider from the AI SDK and enable provider-specific
|
1940
|
+
functionality that can be fully encapsulated in the provider.
|
1941
|
+
*/
|
1942
|
+
providerOptions?: ProviderOptions;
|
1943
|
+
/**
|
1944
|
+
* Optional telemetry configuration (experimental).
|
1945
|
+
*/
|
1946
|
+
experimental_telemetry?: TelemetrySettings;
|
1947
|
+
}): Promise<EmbedResult<VALUE>>;
|
1948
|
+
|
1949
|
+
/**
|
1950
|
+
The result of a `embedMany` call.
|
1951
|
+
It contains the embeddings, the values, and additional information.
|
1952
|
+
*/
|
1953
|
+
interface EmbedManyResult<VALUE> {
|
1954
|
+
/**
|
1955
|
+
The values that were embedded.
|
1956
|
+
*/
|
1957
|
+
readonly values: Array<VALUE>;
|
1958
|
+
/**
|
1959
|
+
The embeddings. They are in the same order as the values.
|
1960
|
+
*/
|
1961
|
+
readonly embeddings: Array<Embedding>;
|
1962
|
+
/**
|
1963
|
+
The embedding token usage.
|
1964
|
+
*/
|
1965
|
+
readonly usage: EmbeddingModelUsage;
|
1966
|
+
/**
|
1967
|
+
Optional raw response data.
|
1968
|
+
*/
|
1969
|
+
readonly responses?: Array<{
|
1970
|
+
/**
|
1971
|
+
Response headers.
|
1972
|
+
*/
|
1973
|
+
headers?: Record<string, string>;
|
1974
|
+
/**
|
1975
|
+
The response body.
|
1976
|
+
*/
|
1977
|
+
body?: unknown;
|
1978
|
+
} | undefined>;
|
1979
|
+
}
|
1980
|
+
|
1981
|
+
/**
|
1982
|
+
Embed several values using an embedding model. The type of the value is defined
|
1983
|
+
by the embedding model.
|
1984
|
+
|
1985
|
+
`embedMany` automatically splits large requests into smaller chunks if the model
|
1986
|
+
has a limit on how many embeddings can be generated in a single call.
|
1987
|
+
|
1988
|
+
@param model - The embedding model to use.
|
1989
|
+
@param values - The values that should be embedded.
|
1990
|
+
|
1991
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1992
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1993
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1994
|
+
|
1995
|
+
@returns A result object that contains the embeddings, the value, and additional information.
|
1996
|
+
*/
|
1997
|
+
declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
1998
|
+
/**
|
1999
|
+
The embedding model to use.
|
2000
|
+
*/
|
2001
|
+
model: EmbeddingModel<VALUE>;
|
2002
|
+
/**
|
2003
|
+
The values that should be embedded.
|
2004
|
+
*/
|
2005
|
+
values: Array<VALUE>;
|
2006
|
+
/**
|
2007
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
2008
|
+
|
2009
|
+
@default 2
|
2010
|
+
*/
|
2011
|
+
maxRetries?: number;
|
2012
|
+
/**
|
2013
|
+
Abort signal.
|
2014
|
+
*/
|
2015
|
+
abortSignal?: AbortSignal;
|
2016
|
+
/**
|
2017
|
+
Additional headers to include in the request.
|
2018
|
+
Only applicable for HTTP-based providers.
|
2019
|
+
*/
|
2020
|
+
headers?: Record<string, string>;
|
2021
|
+
/**
|
2022
|
+
* Optional telemetry configuration (experimental).
|
2023
|
+
*/
|
2024
|
+
experimental_telemetry?: TelemetrySettings;
|
2025
|
+
/**
|
2026
|
+
Additional provider-specific options. They are passed through
|
2027
|
+
to the provider from the AI SDK and enable provider-specific
|
2028
|
+
functionality that can be fully encapsulated in the provider.
|
2029
|
+
*/
|
2030
|
+
providerOptions?: ProviderOptions;
|
2031
|
+
/**
|
2032
|
+
* Maximum number of concurrent requests.
|
2033
|
+
*
|
2034
|
+
* @default Infinity
|
2035
|
+
*/
|
2036
|
+
maxParallelCalls?: number;
|
2037
|
+
}): Promise<EmbedManyResult<VALUE>>;
|
2038
|
+
|
2039
|
+
declare const symbol$c: unique symbol;
|
2040
|
+
declare class InvalidArgumentError extends AISDKError {
|
2041
|
+
private readonly [symbol$c];
|
2042
|
+
readonly parameter: string;
|
2043
|
+
readonly value: unknown;
|
2044
|
+
constructor({ parameter, value, message, }: {
|
2045
|
+
parameter: string;
|
2046
|
+
value: unknown;
|
2047
|
+
message: string;
|
2048
|
+
});
|
2049
|
+
static isInstance(error: unknown): error is InvalidArgumentError;
|
2050
|
+
}
|
2051
|
+
|
2052
|
+
type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
2053
|
+
type: 'text-start';
|
2054
|
+
providerMetadata?: ProviderMetadata;
|
2055
|
+
id: string;
|
2056
|
+
} | {
|
2057
|
+
type: 'text-delta';
|
2058
|
+
id: string;
|
1642
2059
|
providerMetadata?: ProviderMetadata;
|
1643
2060
|
delta: string;
|
1644
2061
|
} | {
|
@@ -1883,480 +2300,299 @@ declare class RetryError extends AISDKError {
|
|
1883
2300
|
static isInstance(error: unknown): error is RetryError;
|
1884
2301
|
}
|
1885
2302
|
|
1886
|
-
|
1887
|
-
|
1888
|
-
|
1889
|
-
|
1890
|
-
|
1891
|
-
response: ServerResponse;
|
1892
|
-
textStream: ReadableStream<string>;
|
1893
|
-
} & ResponseInit): void;
|
1894
|
-
|
1895
|
-
declare const getOriginalFetch: () => typeof fetch;
|
1896
|
-
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
1897
|
-
api: string;
|
1898
|
-
prompt: string;
|
1899
|
-
credentials: RequestCredentials | undefined;
|
1900
|
-
headers: HeadersInit | undefined;
|
1901
|
-
body: Record<string, any>;
|
1902
|
-
streamProtocol: 'data' | 'text' | undefined;
|
1903
|
-
setCompletion: (completion: string) => void;
|
1904
|
-
setLoading: (loading: boolean) => void;
|
1905
|
-
setError: (error: Error | undefined) => void;
|
1906
|
-
setAbortController: (abortController: AbortController | null) => void;
|
1907
|
-
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
1908
|
-
onError: ((error: Error) => void) | undefined;
|
1909
|
-
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
1910
|
-
}): Promise<string | null | undefined>;
|
1911
|
-
|
1912
|
-
interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
|
2303
|
+
/**
|
2304
|
+
The result of a `generateImage` call.
|
2305
|
+
It contains the images and additional information.
|
2306
|
+
*/
|
2307
|
+
interface GenerateImageResult {
|
1913
2308
|
/**
|
1914
|
-
|
2309
|
+
The first image that was generated.
|
1915
2310
|
*/
|
1916
|
-
|
2311
|
+
readonly image: GeneratedFile;
|
1917
2312
|
/**
|
1918
|
-
|
2313
|
+
The images that were generated.
|
2314
|
+
*/
|
2315
|
+
readonly images: Array<GeneratedFile>;
|
2316
|
+
/**
|
2317
|
+
Warnings for the call, e.g. unsupported settings.
|
2318
|
+
*/
|
2319
|
+
readonly warnings: Array<ImageGenerationWarning>;
|
2320
|
+
/**
|
2321
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
1919
2322
|
*/
|
1920
|
-
|
2323
|
+
readonly responses: Array<ImageModelResponseMetadata>;
|
1921
2324
|
/**
|
1922
|
-
*
|
1923
|
-
*
|
1924
|
-
* to prevent duplicated error masking.
|
2325
|
+
* Provider-specific metadata. They are passed through from the provider to the AI SDK and enable provider-specific
|
2326
|
+
* results that can be fully encapsulated in the provider.
|
1925
2327
|
*/
|
1926
|
-
|
2328
|
+
readonly providerMetadata: ImageModelProviderMetadata;
|
1927
2329
|
}
|
1928
2330
|
|
1929
|
-
declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
|
1930
|
-
execute: (options: {
|
1931
|
-
writer: UIMessageStreamWriter<UI_MESSAGE>;
|
1932
|
-
}) => Promise<void> | void;
|
1933
|
-
onError?: (error: unknown) => string;
|
1934
|
-
/**
|
1935
|
-
* The original messages. If they are provided, persistence mode is assumed,
|
1936
|
-
* and a message ID is provided for the response message.
|
1937
|
-
*/
|
1938
|
-
originalMessages?: UI_MESSAGE[];
|
1939
|
-
onFinish?: (options: {
|
1940
|
-
/**
|
1941
|
-
* The updates list of UI messages.
|
1942
|
-
*/
|
1943
|
-
messages: UI_MESSAGE[];
|
1944
|
-
/**
|
1945
|
-
* Indicates whether the response message is a continuation of the last original message,
|
1946
|
-
* or if a new message was created.
|
1947
|
-
*/
|
1948
|
-
isContinuation: boolean;
|
1949
|
-
/**
|
1950
|
-
* The message that was sent to the client as a response
|
1951
|
-
* (including the original message if it was extended).
|
1952
|
-
*/
|
1953
|
-
responseMessage: UI_MESSAGE;
|
1954
|
-
}) => void;
|
1955
|
-
generateId?: IdGenerator;
|
1956
|
-
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
1957
|
-
|
1958
|
-
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
|
1959
|
-
stream: ReadableStream<UIMessageChunk>;
|
1960
|
-
}): Response;
|
1961
|
-
|
1962
|
-
declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
|
1963
|
-
constructor();
|
1964
|
-
}
|
1965
|
-
|
1966
|
-
declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
|
1967
|
-
response: ServerResponse;
|
1968
|
-
stream: ReadableStream<UIMessageChunk>;
|
1969
|
-
} & UIMessageStreamResponseInit): void;
|
1970
|
-
|
1971
2331
|
/**
|
1972
|
-
|
1973
|
-
*
|
1974
|
-
* @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
|
1975
|
-
* @param options.stream - The stream of `UIMessageChunk`s to read.
|
1976
|
-
*
|
1977
|
-
* @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
|
1978
|
-
* as it is being completed.
|
1979
|
-
*/
|
1980
|
-
declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, }: {
|
1981
|
-
message?: UI_MESSAGE;
|
1982
|
-
stream: ReadableStream<UIMessageChunk>;
|
1983
|
-
}): AsyncIterableStream<UI_MESSAGE>;
|
1984
|
-
|
1985
|
-
declare const UI_MESSAGE_STREAM_HEADERS: {
|
1986
|
-
'content-type': string;
|
1987
|
-
'cache-control': string;
|
1988
|
-
connection: string;
|
1989
|
-
'x-vercel-ai-ui-message-stream': string;
|
1990
|
-
'x-accel-buffering': string;
|
1991
|
-
};
|
2332
|
+
Generates images using an image model.
|
1992
2333
|
|
1993
|
-
|
1994
|
-
|
1995
|
-
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
2005
|
-
}
|
2334
|
+
@param model - The image model to use.
|
2335
|
+
@param prompt - The prompt that should be used to generate the image.
|
2336
|
+
@param n - Number of images to generate. Default: 1.
|
2337
|
+
@param size - Size of the images to generate. Must have the format `{width}x{height}`.
|
2338
|
+
@param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
|
2339
|
+
@param seed - Seed for the image generation.
|
2340
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
2341
|
+
as body parameters.
|
2342
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2343
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2344
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2006
2345
|
|
2007
|
-
|
2008
|
-
|
2009
|
-
|
2010
|
-
};
|
2011
|
-
type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
2012
|
-
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
2013
|
-
[K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
|
2014
|
-
};
|
2015
|
-
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
2016
|
-
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
2017
|
-
};
|
2018
|
-
type ChatRequestOptions = {
|
2346
|
+
@returns A result object that contains the generated images.
|
2347
|
+
*/
|
2348
|
+
declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
2019
2349
|
/**
|
2020
|
-
|
2350
|
+
The image model to use.
|
2351
|
+
*/
|
2352
|
+
model: ImageModelV2;
|
2353
|
+
/**
|
2354
|
+
The prompt that should be used to generate the image.
|
2021
2355
|
*/
|
2022
|
-
|
2356
|
+
prompt: string;
|
2023
2357
|
/**
|
2024
|
-
|
2358
|
+
Number of images to generate.
|
2025
2359
|
*/
|
2026
|
-
|
2027
|
-
metadata?: unknown;
|
2028
|
-
};
|
2029
|
-
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
2030
|
-
interface ChatState<UI_MESSAGE extends UIMessage> {
|
2031
|
-
status: ChatStatus;
|
2032
|
-
error: Error | undefined;
|
2033
|
-
messages: UI_MESSAGE[];
|
2034
|
-
pushMessage: (message: UI_MESSAGE) => void;
|
2035
|
-
popMessage: () => void;
|
2036
|
-
replaceMessage: (index: number, message: UI_MESSAGE) => void;
|
2037
|
-
snapshot: <T>(thing: T) => T;
|
2038
|
-
}
|
2039
|
-
type ChatOnErrorCallback = (error: Error) => void;
|
2040
|
-
type ChatOnToolCallCallback = ({ toolCall, }: {
|
2041
|
-
toolCall: ToolCall<string, unknown>;
|
2042
|
-
}) => void | Promise<unknown> | unknown;
|
2043
|
-
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
2044
|
-
type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
|
2045
|
-
message: UI_MESSAGE;
|
2046
|
-
}) => void;
|
2047
|
-
interface ChatInit<UI_MESSAGE extends UIMessage> {
|
2360
|
+
n?: number;
|
2048
2361
|
/**
|
2049
|
-
|
2050
|
-
* generated.
|
2362
|
+
Number of images to generate.
|
2051
2363
|
*/
|
2052
|
-
|
2053
|
-
messageMetadataSchema?: Validator<InferUIMessageMetadata<UI_MESSAGE>> | StandardSchemaV1<InferUIMessageMetadata<UI_MESSAGE>>;
|
2054
|
-
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
2055
|
-
messages?: UI_MESSAGE[];
|
2364
|
+
maxImagesPerCall?: number;
|
2056
2365
|
/**
|
2057
|
-
|
2058
|
-
* If not provided the default AI SDK `generateId` is used.
|
2366
|
+
Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
|
2059
2367
|
*/
|
2060
|
-
|
2061
|
-
transport?: ChatTransport<UI_MESSAGE>;
|
2062
|
-
maxSteps?: number;
|
2368
|
+
size?: `${number}x${number}`;
|
2063
2369
|
/**
|
2064
|
-
|
2370
|
+
Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
|
2065
2371
|
*/
|
2066
|
-
|
2372
|
+
aspectRatio?: `${number}:${number}`;
|
2067
2373
|
/**
|
2068
|
-
|
2069
|
-
|
2374
|
+
Seed for the image generation. If not provided, the default seed will be used.
|
2375
|
+
*/
|
2376
|
+
seed?: number;
|
2377
|
+
/**
|
2378
|
+
Additional provider-specific options that are passed through to the provider
|
2379
|
+
as body parameters.
|
2070
2380
|
|
2071
|
-
|
2072
|
-
|
2381
|
+
The outer record is keyed by the provider name, and the inner
|
2382
|
+
record is keyed by the provider-specific metadata key.
|
2383
|
+
```ts
|
2384
|
+
{
|
2385
|
+
"openai": {
|
2386
|
+
"style": "vivid"
|
2387
|
+
}
|
2388
|
+
}
|
2389
|
+
```
|
2073
2390
|
*/
|
2074
|
-
|
2391
|
+
providerOptions?: ProviderOptions;
|
2075
2392
|
/**
|
2076
|
-
|
2077
|
-
|
2078
|
-
|
2393
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
2394
|
+
|
2395
|
+
@default 2
|
2079
2396
|
*/
|
2080
|
-
|
2397
|
+
maxRetries?: number;
|
2081
2398
|
/**
|
2082
|
-
|
2083
|
-
|
2084
|
-
|
2399
|
+
Abort signal.
|
2400
|
+
*/
|
2401
|
+
abortSignal?: AbortSignal;
|
2402
|
+
/**
|
2403
|
+
Additional headers to include in the request.
|
2404
|
+
Only applicable for HTTP-based providers.
|
2405
|
+
*/
|
2406
|
+
headers?: Record<string, string>;
|
2407
|
+
}): Promise<GenerateImageResult>;
|
2408
|
+
|
2409
|
+
/**
|
2410
|
+
The result of a `generateObject` call.
|
2411
|
+
*/
|
2412
|
+
interface GenerateObjectResult<OBJECT> {
|
2413
|
+
/**
|
2414
|
+
The generated object (typed according to the schema).
|
2415
|
+
*/
|
2416
|
+
readonly object: OBJECT;
|
2417
|
+
/**
|
2418
|
+
The reason why the generation finished.
|
2419
|
+
*/
|
2420
|
+
readonly finishReason: FinishReason;
|
2421
|
+
/**
|
2422
|
+
The token usage of the generated text.
|
2423
|
+
*/
|
2424
|
+
readonly usage: LanguageModelUsage;
|
2425
|
+
/**
|
2426
|
+
Warnings from the model provider (e.g. unsupported settings).
|
2427
|
+
*/
|
2428
|
+
readonly warnings: CallWarning[] | undefined;
|
2429
|
+
/**
|
2430
|
+
Additional request information.
|
2085
2431
|
*/
|
2086
|
-
|
2087
|
-
}
|
2088
|
-
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
2089
|
-
readonly id: string;
|
2090
|
-
readonly generateId: IdGenerator;
|
2091
|
-
protected state: ChatState<UI_MESSAGE>;
|
2092
|
-
private messageMetadataSchema;
|
2093
|
-
private dataPartSchemas;
|
2094
|
-
private readonly transport;
|
2095
|
-
private maxSteps;
|
2096
|
-
private onError?;
|
2097
|
-
private onToolCall?;
|
2098
|
-
private onFinish?;
|
2099
|
-
private onData?;
|
2100
|
-
private activeResponse;
|
2101
|
-
private jobExecutor;
|
2102
|
-
constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
2103
|
-
state: ChatState<UI_MESSAGE>;
|
2104
|
-
});
|
2432
|
+
readonly request: LanguageModelRequestMetadata;
|
2105
2433
|
/**
|
2106
|
-
|
2107
|
-
*
|
2108
|
-
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
2109
|
-
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
2110
|
-
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
2111
|
-
* - `error`: An error occurred during the API request, preventing successful completion.
|
2434
|
+
Additional response information.
|
2112
2435
|
*/
|
2113
|
-
|
2114
|
-
|
2115
|
-
|
2116
|
-
|
2117
|
-
|
2118
|
-
|
2119
|
-
get messages(): UI_MESSAGE[];
|
2120
|
-
get lastMessage(): UI_MESSAGE | undefined;
|
2121
|
-
set messages(messages: UI_MESSAGE[]);
|
2436
|
+
readonly response: LanguageModelResponseMetadata & {
|
2437
|
+
/**
|
2438
|
+
Response body (available only for providers that use HTTP requests).
|
2439
|
+
*/
|
2440
|
+
body?: unknown;
|
2441
|
+
};
|
2122
2442
|
/**
|
2123
|
-
|
2124
|
-
|
2125
|
-
|
2126
|
-
* If a messageId is provided, the message will be replaced.
|
2443
|
+
Additional provider-specific metadata. They are passed through
|
2444
|
+
from the provider to the AI SDK and enable provider-specific
|
2445
|
+
results that can be fully encapsulated in the provider.
|
2127
2446
|
*/
|
2128
|
-
|
2129
|
-
text?: never;
|
2130
|
-
files?: never;
|
2131
|
-
messageId?: string;
|
2132
|
-
}) | {
|
2133
|
-
text: string;
|
2134
|
-
files?: FileList | FileUIPart[];
|
2135
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
2136
|
-
parts?: never;
|
2137
|
-
messageId?: string;
|
2138
|
-
} | {
|
2139
|
-
files: FileList | FileUIPart[];
|
2140
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
2141
|
-
parts?: never;
|
2142
|
-
messageId?: string;
|
2143
|
-
}, options?: ChatRequestOptions) => Promise<void>;
|
2144
|
-
/**
|
2145
|
-
* Regenerate the assistant message with the provided message id.
|
2146
|
-
* If no message id is provided, the last assistant message will be regenerated.
|
2147
|
-
*/
|
2148
|
-
regenerate: ({ messageId, ...options }?: {
|
2149
|
-
messageId?: string;
|
2150
|
-
} & ChatRequestOptions) => Promise<void>;
|
2151
|
-
/**
|
2152
|
-
* Attempt to resume an ongoing streaming response.
|
2153
|
-
*/
|
2154
|
-
resumeStream: (options?: ChatRequestOptions) => Promise<void>;
|
2155
|
-
addToolResult: ({ toolCallId, output, }: {
|
2156
|
-
toolCallId: string;
|
2157
|
-
output: unknown;
|
2158
|
-
}) => Promise<void>;
|
2447
|
+
readonly providerMetadata: ProviderMetadata | undefined;
|
2159
2448
|
/**
|
2160
|
-
|
2161
|
-
|
2162
|
-
|
2163
|
-
|
2449
|
+
Converts the object to a JSON response.
|
2450
|
+
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
2451
|
+
*/
|
2452
|
+
toJsonResponse(init?: ResponseInit): Response;
|
2164
2453
|
}
|
2165
2454
|
|
2166
|
-
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
2167
|
-
|
2168
2455
|
/**
|
2169
|
-
|
2170
|
-
|
2456
|
+
A function that attempts to repair the raw output of the mode
|
2457
|
+
to enable JSON parsing.
|
2171
2458
|
|
2172
|
-
|
2173
|
-
|
2174
|
-
|
2175
|
-
|
2176
|
-
|
2177
|
-
|
2178
|
-
ignoreIncompleteToolCalls?: boolean;
|
2179
|
-
}): ModelMessage[];
|
2459
|
+
Should return the repaired text or null if the text cannot be repaired.
|
2460
|
+
*/
|
2461
|
+
type RepairTextFunction = (options: {
|
2462
|
+
text: string;
|
2463
|
+
error: JSONParseError | TypeValidationError;
|
2464
|
+
}) => Promise<string | null>;
|
2180
2465
|
/**
|
2181
|
-
|
2182
|
-
*/
|
2183
|
-
declare const convertToCoreMessages: typeof convertToModelMessages;
|
2466
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
2184
2467
|
|
2185
|
-
|
2186
|
-
|
2187
|
-
|
2188
|
-
|
2189
|
-
|
2190
|
-
|
2191
|
-
|
2192
|
-
|
2193
|
-
|
2194
|
-
|
2195
|
-
|
2196
|
-
|
2197
|
-
|
2198
|
-
|
2199
|
-
|
2200
|
-
|
2201
|
-
|
2202
|
-
|
2203
|
-
|
2204
|
-
|
2205
|
-
|
2206
|
-
|
2207
|
-
|
2208
|
-
|
2209
|
-
|
2210
|
-
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
|
2223
|
-
|
2224
|
-
|
2468
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
2469
|
+
|
2470
|
+
@param model - The language model to use.
|
2471
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2472
|
+
|
2473
|
+
@param system - A system message that will be part of the prompt.
|
2474
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
2475
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
2476
|
+
|
2477
|
+
@param maxOutputTokens - Maximum number of tokens to generate.
|
2478
|
+
@param temperature - Temperature setting.
|
2479
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2480
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2481
|
+
@param topP - Nucleus sampling.
|
2482
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2483
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2484
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
2485
|
+
Used to remove "long tail" low probability responses.
|
2486
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
2487
|
+
@param presencePenalty - Presence penalty setting.
|
2488
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
2489
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2490
|
+
@param frequencyPenalty - Frequency penalty setting.
|
2491
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
2492
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2493
|
+
@param stopSequences - Stop sequences.
|
2494
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
2495
|
+
@param seed - The seed (integer) to use for random sampling.
|
2496
|
+
If set and supported by the model, calls will generate deterministic results.
|
2497
|
+
|
2498
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2499
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2500
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2501
|
+
|
2502
|
+
@param schema - The schema of the object that the model should generate.
|
2503
|
+
@param schemaName - Optional name of the output that should be generated.
|
2504
|
+
Used by some providers for additional LLM guidance, e.g.
|
2505
|
+
via tool or schema name.
|
2506
|
+
@param schemaDescription - Optional description of the output that should be generated.
|
2507
|
+
Used by some providers for additional LLM guidance, e.g.
|
2508
|
+
via tool or schema description.
|
2509
|
+
|
2510
|
+
@param output - The type of the output.
|
2511
|
+
|
2512
|
+
- 'object': The output is an object.
|
2513
|
+
- 'array': The output is an array.
|
2514
|
+
- 'enum': The output is an enum.
|
2515
|
+
- 'no-schema': The output is not a schema.
|
2516
|
+
|
2517
|
+
@param experimental_repairText - A function that attempts to repair the raw output of the mode
|
2518
|
+
to enable JSON parsing.
|
2519
|
+
|
2520
|
+
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
2521
|
+
|
2522
|
+
@param providerOptions - Additional provider-specific options. They are passed through
|
2523
|
+
to the provider from the AI SDK and enable provider-specific
|
2524
|
+
functionality that can be fully encapsulated in the provider.
|
2525
|
+
|
2526
|
+
@returns
|
2527
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
2528
|
+
*/
|
2529
|
+
declare function generateObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
2225
2530
|
/**
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2230
|
-
|
2531
|
+
The enum values that the model should use.
|
2532
|
+
*/
|
2533
|
+
enum: Array<RESULT>;
|
2534
|
+
mode?: 'json';
|
2535
|
+
output: 'enum';
|
2536
|
+
} : OUTPUT extends 'no-schema' ? {} : {
|
2231
2537
|
/**
|
2232
|
-
|
2233
|
-
|
2234
|
-
|
2538
|
+
The schema of the object that the model should generate.
|
2539
|
+
*/
|
2540
|
+
schema: SCHEMA;
|
2235
2541
|
/**
|
2236
|
-
|
2237
|
-
|
2238
|
-
|
2239
|
-
|
2240
|
-
|
2241
|
-
* body: {
|
2242
|
-
* sessionId: '123',
|
2243
|
-
* }
|
2244
|
-
* })
|
2245
|
-
* ```
|
2246
|
-
*/
|
2247
|
-
body?: object;
|
2542
|
+
Optional name of the output that should be generated.
|
2543
|
+
Used by some providers for additional LLM guidance, e.g.
|
2544
|
+
via tool or schema name.
|
2545
|
+
*/
|
2546
|
+
schemaName?: string;
|
2248
2547
|
/**
|
2249
|
-
|
2250
|
-
|
2251
|
-
|
2252
|
-
|
2548
|
+
Optional description of the output that should be generated.
|
2549
|
+
Used by some providers for additional LLM guidance, e.g.
|
2550
|
+
via tool or schema description.
|
2551
|
+
*/
|
2552
|
+
schemaDescription?: string;
|
2253
2553
|
/**
|
2254
|
-
|
2255
|
-
* to prepare the request body for the chat API. This can be useful for
|
2256
|
-
* customizing the request body based on the messages and data in the chat.
|
2257
|
-
*
|
2258
|
-
* @param id The id of the chat.
|
2259
|
-
* @param messages The current messages in the chat.
|
2260
|
-
* @param requestBody The request body object passed in the chat request.
|
2261
|
-
*/
|
2262
|
-
prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
2263
|
-
prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
2264
|
-
};
|
2265
|
-
declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
|
2266
|
-
protected api: string;
|
2267
|
-
protected credentials?: RequestCredentials;
|
2268
|
-
protected headers?: Record<string, string> | Headers;
|
2269
|
-
protected body?: object;
|
2270
|
-
protected fetch?: FetchFunction;
|
2271
|
-
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
2272
|
-
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
2273
|
-
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
2274
|
-
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
2275
|
-
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
2276
|
-
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
2277
|
-
}
|
2554
|
+
The mode to use for object generation.
|
2278
2555
|
|
2279
|
-
|
2280
|
-
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
2281
|
-
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
2282
|
-
}
|
2556
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
2283
2557
|
|
2284
|
-
|
2285
|
-
|
2286
|
-
|
2287
|
-
}
|
2558
|
+
- 'auto': The provider will choose the best mode for the model.
|
2559
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
2560
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
2288
2561
|
|
2289
|
-
|
2562
|
+
Please note that most providers do not support all modes.
|
2563
|
+
|
2564
|
+
Default and recommended: 'auto' (best mode for the model).
|
2565
|
+
*/
|
2566
|
+
mode?: 'auto' | 'json' | 'tool';
|
2567
|
+
}) & {
|
2568
|
+
output?: OUTPUT;
|
2290
2569
|
/**
|
2291
|
-
|
2570
|
+
The language model to use.
|
2292
2571
|
*/
|
2293
|
-
|
2572
|
+
model: LanguageModel;
|
2294
2573
|
/**
|
2295
|
-
|
2296
|
-
|
2297
|
-
body?: object;
|
2298
|
-
};
|
2299
|
-
type UseCompletionOptions = {
|
2300
|
-
/**
|
2301
|
-
* The API endpoint that accepts a `{ prompt: string }` object and returns
|
2302
|
-
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
|
2303
|
-
*/
|
2304
|
-
api?: string;
|
2305
|
-
/**
|
2306
|
-
* An unique identifier for the chat. If not provided, a random one will be
|
2307
|
-
* generated. When provided, the `useChat` hook with the same `id` will
|
2308
|
-
* have shared states across components.
|
2309
|
-
*/
|
2310
|
-
id?: string;
|
2311
|
-
/**
|
2312
|
-
* Initial prompt input of the completion.
|
2313
|
-
*/
|
2314
|
-
initialInput?: string;
|
2315
|
-
/**
|
2316
|
-
* Initial completion result. Useful to load an existing history.
|
2317
|
-
*/
|
2318
|
-
initialCompletion?: string;
|
2319
|
-
/**
|
2320
|
-
* Callback function to be called when the completion is finished streaming.
|
2321
|
-
*/
|
2322
|
-
onFinish?: (prompt: string, completion: string) => void;
|
2323
|
-
/**
|
2324
|
-
* Callback function to be called when an error is encountered.
|
2574
|
+
A function that attempts to repair the raw output of the mode
|
2575
|
+
to enable JSON parsing.
|
2325
2576
|
*/
|
2326
|
-
|
2577
|
+
experimental_repairText?: RepairTextFunction;
|
2327
2578
|
/**
|
2328
|
-
|
2329
|
-
|
2330
|
-
|
2331
|
-
*/
|
2332
|
-
credentials?: RequestCredentials;
|
2579
|
+
Optional telemetry configuration (experimental).
|
2580
|
+
*/
|
2581
|
+
experimental_telemetry?: TelemetrySettings;
|
2333
2582
|
/**
|
2334
|
-
|
2335
|
-
|
2336
|
-
|
2583
|
+
Additional provider-specific options. They are passed through
|
2584
|
+
to the provider from the AI SDK and enable provider-specific
|
2585
|
+
functionality that can be fully encapsulated in the provider.
|
2586
|
+
*/
|
2587
|
+
providerOptions?: ProviderOptions;
|
2337
2588
|
/**
|
2338
|
-
*
|
2339
|
-
* @example
|
2340
|
-
* Send a `sessionId` to the API along with the prompt.
|
2341
|
-
* ```js
|
2342
|
-
* useChat({
|
2343
|
-
* body: {
|
2344
|
-
* sessionId: '123',
|
2345
|
-
* }
|
2346
|
-
* })
|
2347
|
-
* ```
|
2589
|
+
* Internal. For test use only. May change without notice.
|
2348
2590
|
*/
|
2349
|
-
|
2350
|
-
|
2351
|
-
|
2352
|
-
|
2353
|
-
|
2354
|
-
/**
|
2355
|
-
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
2356
|
-
or to provide a custom fetch implementation for e.g. testing.
|
2357
|
-
*/
|
2358
|
-
fetch?: FetchFunction;
|
2359
|
-
};
|
2591
|
+
_internal?: {
|
2592
|
+
generateId?: () => string;
|
2593
|
+
currentDate?: () => Date;
|
2594
|
+
};
|
2595
|
+
}): Promise<GenerateObjectResult<RESULT>>;
|
2360
2596
|
|
2361
2597
|
/**
|
2362
2598
|
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
@@ -2419,257 +2655,137 @@ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDela
|
|
2419
2655
|
}): ReadableStream<T>;
|
2420
2656
|
|
2421
2657
|
/**
|
2422
|
-
The result of
|
2423
|
-
It contains the embedding, the value, and additional information.
|
2658
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
2424
2659
|
*/
|
2425
|
-
interface
|
2426
|
-
/**
|
2427
|
-
The value that was embedded.
|
2428
|
-
*/
|
2429
|
-
readonly value: VALUE;
|
2430
|
-
/**
|
2431
|
-
The embedding of the value.
|
2432
|
-
*/
|
2433
|
-
readonly embedding: Embedding;
|
2434
|
-
/**
|
2435
|
-
The embedding token usage.
|
2436
|
-
*/
|
2437
|
-
readonly usage: EmbeddingModelUsage;
|
2660
|
+
interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
2438
2661
|
/**
|
2439
|
-
|
2662
|
+
Warnings from the model provider (e.g. unsupported settings)
|
2440
2663
|
*/
|
2441
|
-
readonly
|
2442
|
-
/**
|
2443
|
-
Response headers.
|
2444
|
-
*/
|
2445
|
-
headers?: Record<string, string>;
|
2446
|
-
/**
|
2447
|
-
The response body.
|
2448
|
-
*/
|
2449
|
-
body?: unknown;
|
2450
|
-
};
|
2451
|
-
}
|
2452
|
-
|
2453
|
-
/**
|
2454
|
-
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
2455
|
-
|
2456
|
-
@param model - The embedding model to use.
|
2457
|
-
@param value - The value that should be embedded.
|
2458
|
-
|
2459
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2460
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2461
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2462
|
-
|
2463
|
-
@returns A result object that contains the embedding, the value, and additional information.
|
2464
|
-
*/
|
2465
|
-
declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
2664
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
2466
2665
|
/**
|
2467
|
-
|
2666
|
+
The token usage of the generated response. Resolved when the response is finished.
|
2468
2667
|
*/
|
2469
|
-
|
2470
|
-
/**
|
2471
|
-
The value that should be embedded.
|
2472
|
-
*/
|
2473
|
-
value: VALUE;
|
2668
|
+
readonly usage: Promise<LanguageModelUsage>;
|
2474
2669
|
/**
|
2475
|
-
|
2476
|
-
|
2477
|
-
|
2670
|
+
Additional provider-specific metadata. They are passed through
|
2671
|
+
from the provider to the AI SDK and enable provider-specific
|
2672
|
+
results that can be fully encapsulated in the provider.
|
2478
2673
|
*/
|
2479
|
-
|
2674
|
+
readonly providerMetadata: Promise<ProviderMetadata | undefined>;
|
2480
2675
|
/**
|
2481
|
-
|
2676
|
+
Additional request information from the last step.
|
2482
2677
|
*/
|
2483
|
-
|
2678
|
+
readonly request: Promise<LanguageModelRequestMetadata>;
|
2484
2679
|
/**
|
2485
|
-
Additional
|
2486
|
-
Only applicable for HTTP-based providers.
|
2680
|
+
Additional response information.
|
2487
2681
|
*/
|
2488
|
-
|
2682
|
+
readonly response: Promise<LanguageModelResponseMetadata>;
|
2489
2683
|
/**
|
2490
|
-
|
2491
|
-
|
2492
|
-
|
2493
|
-
*/
|
2494
|
-
providerOptions?: ProviderOptions;
|
2684
|
+
The generated object (typed according to the schema). Resolved when the response is finished.
|
2685
|
+
*/
|
2686
|
+
readonly object: Promise<RESULT>;
|
2495
2687
|
/**
|
2496
|
-
|
2688
|
+
Stream of partial objects. It gets more complete as the stream progresses.
|
2689
|
+
|
2690
|
+
Note that the partial object is not validated.
|
2691
|
+
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
2692
|
+
*/
|
2693
|
+
readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
|
2694
|
+
/**
|
2695
|
+
* Stream over complete array elements. Only available if the output strategy is set to `array`.
|
2497
2696
|
*/
|
2498
|
-
|
2499
|
-
}): Promise<EmbedResult<VALUE>>;
|
2500
|
-
|
2501
|
-
/**
|
2502
|
-
The result of a `embedMany` call.
|
2503
|
-
It contains the embeddings, the values, and additional information.
|
2504
|
-
*/
|
2505
|
-
interface EmbedManyResult<VALUE> {
|
2697
|
+
readonly elementStream: ELEMENT_STREAM;
|
2506
2698
|
/**
|
2507
|
-
|
2699
|
+
Text stream of the JSON representation of the generated object. It contains text chunks.
|
2700
|
+
When the stream is finished, the object is valid JSON that can be parsed.
|
2508
2701
|
*/
|
2509
|
-
readonly
|
2510
|
-
/**
|
2511
|
-
The embeddings. They are in the same order as the values.
|
2512
|
-
*/
|
2513
|
-
readonly embeddings: Array<Embedding>;
|
2702
|
+
readonly textStream: AsyncIterableStream<string>;
|
2514
2703
|
/**
|
2515
|
-
|
2516
|
-
|
2517
|
-
|
2704
|
+
Stream of different types of events, including partial objects, errors, and finish events.
|
2705
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
2706
|
+
*/
|
2707
|
+
readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
|
2518
2708
|
/**
|
2519
|
-
|
2709
|
+
Writes text delta output to a Node.js response-like object.
|
2710
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2711
|
+
writes each text delta as a separate chunk.
|
2712
|
+
|
2713
|
+
@param response A Node.js response-like object (ServerResponse).
|
2714
|
+
@param init Optional headers, status code, and status text.
|
2520
2715
|
*/
|
2521
|
-
|
2522
|
-
|
2523
|
-
|
2716
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
2717
|
+
/**
|
2718
|
+
Creates a simple text stream response.
|
2719
|
+
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
2720
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
2721
|
+
Non-text-delta events are ignored.
|
2722
|
+
|
2723
|
+
@param init Optional headers, status code, and status text.
|
2524
2724
|
*/
|
2525
|
-
|
2526
|
-
/**
|
2527
|
-
The response body.
|
2528
|
-
*/
|
2529
|
-
body?: unknown;
|
2530
|
-
} | undefined>;
|
2725
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
2531
2726
|
}
|
2727
|
+
type ObjectStreamPart<PARTIAL> = {
|
2728
|
+
type: 'object';
|
2729
|
+
object: PARTIAL;
|
2730
|
+
} | {
|
2731
|
+
type: 'text-delta';
|
2732
|
+
textDelta: string;
|
2733
|
+
} | {
|
2734
|
+
type: 'error';
|
2735
|
+
error: unknown;
|
2736
|
+
} | {
|
2737
|
+
type: 'finish';
|
2738
|
+
finishReason: FinishReason;
|
2739
|
+
usage: LanguageModelUsage;
|
2740
|
+
response: LanguageModelResponseMetadata;
|
2741
|
+
providerMetadata?: ProviderMetadata;
|
2742
|
+
};
|
2532
2743
|
|
2533
2744
|
/**
|
2534
|
-
|
2535
|
-
by the embedding model.
|
2536
|
-
|
2537
|
-
`embedMany` automatically splits large requests into smaller chunks if the model
|
2538
|
-
has a limit on how many embeddings can be generated in a single call.
|
2539
|
-
|
2540
|
-
@param model - The embedding model to use.
|
2541
|
-
@param values - The values that should be embedded.
|
2745
|
+
Callback that is set using the `onError` option.
|
2542
2746
|
|
2543
|
-
@param
|
2544
|
-
|
2545
|
-
|
2747
|
+
@param event - The event that is passed to the callback.
|
2748
|
+
*/
|
2749
|
+
type StreamObjectOnErrorCallback = (event: {
|
2750
|
+
error: unknown;
|
2751
|
+
}) => Promise<void> | void;
|
2752
|
+
/**
|
2753
|
+
Callback that is set using the `onFinish` option.
|
2546
2754
|
|
2547
|
-
@
|
2755
|
+
@param event - The event that is passed to the callback.
|
2548
2756
|
*/
|
2549
|
-
|
2550
|
-
/**
|
2551
|
-
The embedding model to use.
|
2552
|
-
*/
|
2553
|
-
model: EmbeddingModel<VALUE>;
|
2757
|
+
type StreamObjectOnFinishCallback<RESULT> = (event: {
|
2554
2758
|
/**
|
2555
|
-
The
|
2556
|
-
|
2557
|
-
|
2759
|
+
The token usage of the generated response.
|
2760
|
+
*/
|
2761
|
+
usage: LanguageModelUsage;
|
2558
2762
|
/**
|
2559
|
-
|
2560
|
-
|
2561
|
-
|
2562
|
-
*/
|
2563
|
-
maxRetries?: number;
|
2763
|
+
The generated object. Can be undefined if the final object does not match the schema.
|
2764
|
+
*/
|
2765
|
+
object: RESULT | undefined;
|
2564
2766
|
/**
|
2565
|
-
|
2566
|
-
|
2567
|
-
|
2767
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
2768
|
+
*/
|
2769
|
+
error: unknown | undefined;
|
2568
2770
|
/**
|
2569
|
-
|
2570
|
-
Only applicable for HTTP-based providers.
|
2771
|
+
Response metadata.
|
2571
2772
|
*/
|
2572
|
-
|
2573
|
-
/**
|
2574
|
-
* Optional telemetry configuration (experimental).
|
2575
|
-
*/
|
2576
|
-
experimental_telemetry?: TelemetrySettings;
|
2577
|
-
/**
|
2578
|
-
Additional provider-specific options. They are passed through
|
2579
|
-
to the provider from the AI SDK and enable provider-specific
|
2580
|
-
functionality that can be fully encapsulated in the provider.
|
2581
|
-
*/
|
2582
|
-
providerOptions?: ProviderOptions;
|
2583
|
-
/**
|
2584
|
-
* Maximum number of concurrent requests.
|
2585
|
-
*
|
2586
|
-
* @default Infinity
|
2587
|
-
*/
|
2588
|
-
maxParallelCalls?: number;
|
2589
|
-
}): Promise<EmbedManyResult<VALUE>>;
|
2590
|
-
|
2591
|
-
/**
|
2592
|
-
* Detects the first chunk in a buffer.
|
2593
|
-
*
|
2594
|
-
* @param buffer - The buffer to detect the first chunk in.
|
2595
|
-
*
|
2596
|
-
* @returns The first detected chunk, or `undefined` if no chunk was detected.
|
2597
|
-
*/
|
2598
|
-
type ChunkDetector = (buffer: string) => string | undefined | null;
|
2599
|
-
/**
|
2600
|
-
* Smooths text streaming output.
|
2601
|
-
*
|
2602
|
-
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
2603
|
-
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
2604
|
-
*
|
2605
|
-
* @returns A transform stream that smooths text streaming output.
|
2606
|
-
*/
|
2607
|
-
declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
|
2608
|
-
delayInMs?: number | null;
|
2609
|
-
chunking?: 'word' | 'line' | RegExp | ChunkDetector;
|
2610
|
-
/**
|
2611
|
-
* Internal. For test use only. May change without notice.
|
2612
|
-
*/
|
2613
|
-
_internal?: {
|
2614
|
-
delay?: (delayInMs: number | null) => Promise<void>;
|
2615
|
-
};
|
2616
|
-
}): (options: {
|
2617
|
-
tools: TOOLS;
|
2618
|
-
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2619
|
-
|
2620
|
-
/**
|
2621
|
-
A transformation that is applied to the stream.
|
2622
|
-
|
2623
|
-
@param stopStream - A function that stops the source stream.
|
2624
|
-
@param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2625
|
-
*/
|
2626
|
-
type StreamTextTransform<TOOLS extends ToolSet> = (options: {
|
2627
|
-
tools: TOOLS;
|
2628
|
-
stopStream: () => void;
|
2629
|
-
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2630
|
-
/**
|
2631
|
-
Callback that is set using the `onError` option.
|
2632
|
-
|
2633
|
-
@param event - The event that is passed to the callback.
|
2634
|
-
*/
|
2635
|
-
type StreamTextOnErrorCallback = (event: {
|
2636
|
-
error: unknown;
|
2637
|
-
}) => Promise<void> | void;
|
2638
|
-
/**
|
2639
|
-
Callback that is set using the `onStepFinish` option.
|
2640
|
-
|
2641
|
-
@param stepResult - The result of the step.
|
2642
|
-
*/
|
2643
|
-
type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
2644
|
-
/**
|
2645
|
-
Callback that is set using the `onChunk` option.
|
2646
|
-
|
2647
|
-
@param event - The event that is passed to the callback.
|
2648
|
-
*/
|
2649
|
-
type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
|
2650
|
-
chunk: Extract<TextStreamPart<TOOLS>, {
|
2651
|
-
type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
|
2652
|
-
}>;
|
2653
|
-
}) => Promise<void> | void;
|
2654
|
-
/**
|
2655
|
-
Callback that is set using the `onFinish` option.
|
2656
|
-
|
2657
|
-
@param event - The event that is passed to the callback.
|
2658
|
-
*/
|
2659
|
-
type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
2773
|
+
response: LanguageModelResponseMetadata;
|
2660
2774
|
/**
|
2661
|
-
|
2662
|
-
|
2663
|
-
|
2775
|
+
Warnings from the model provider (e.g. unsupported settings).
|
2776
|
+
*/
|
2777
|
+
warnings?: CallWarning[];
|
2664
2778
|
/**
|
2665
|
-
|
2666
|
-
|
2667
|
-
|
2779
|
+
Additional provider-specific metadata. They are passed through
|
2780
|
+
to the provider from the AI SDK and enable provider-specific
|
2781
|
+
functionality that can be fully encapsulated in the provider.
|
2782
|
+
*/
|
2783
|
+
providerMetadata: ProviderMetadata | undefined;
|
2668
2784
|
}) => Promise<void> | void;
|
2669
2785
|
/**
|
2670
|
-
Generate a
|
2786
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
2671
2787
|
|
2672
|
-
This function streams the output. If you do not want to stream the output, use `
|
2788
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
2673
2789
|
|
2674
2790
|
@param model - The language model to use.
|
2675
2791
|
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
@@ -2703,37 +2819,74 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2703
2819
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2704
2820
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2705
2821
|
|
2706
|
-
@param
|
2822
|
+
@param schema - The schema of the object that the model should generate.
|
2823
|
+
@param schemaName - Optional name of the output that should be generated.
|
2824
|
+
Used by some providers for additional LLM guidance, e.g.
|
2825
|
+
via tool or schema name.
|
2826
|
+
@param schemaDescription - Optional description of the output that should be generated.
|
2827
|
+
Used by some providers for additional LLM guidance, e.g.
|
2828
|
+
via tool or schema description.
|
2707
2829
|
|
2708
|
-
@param
|
2709
|
-
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
2710
|
-
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2711
|
-
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
2712
|
-
(for tools that have an `execute` function) are finished.
|
2830
|
+
@param output - The type of the output.
|
2713
2831
|
|
2714
|
-
|
2715
|
-
|
2832
|
+
- 'object': The output is an object.
|
2833
|
+
- 'array': The output is an array.
|
2834
|
+
- 'enum': The output is an enum.
|
2835
|
+
- 'no-schema': The output is not a schema.
|
2836
|
+
|
2837
|
+
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
2838
|
+
|
2839
|
+
@param providerOptions - Additional provider-specific options. They are passed through
|
2840
|
+
to the provider from the AI SDK and enable provider-specific
|
2841
|
+
functionality that can be fully encapsulated in the provider.
|
2842
|
+
|
2843
|
+
@returns
|
2844
|
+
A result object for accessing the partial object stream and additional information.
|
2716
2845
|
*/
|
2717
|
-
declare function
|
2846
|
+
declare function streamObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
2718
2847
|
/**
|
2719
|
-
The
|
2720
|
-
|
2721
|
-
|
2848
|
+
The enum values that the model should use.
|
2849
|
+
*/
|
2850
|
+
enum: Array<RESULT>;
|
2851
|
+
mode?: 'json';
|
2852
|
+
output: 'enum';
|
2853
|
+
} : OUTPUT extends 'no-schema' ? {} : {
|
2722
2854
|
/**
|
2723
|
-
The
|
2724
|
-
|
2725
|
-
|
2855
|
+
The schema of the object that the model should generate.
|
2856
|
+
*/
|
2857
|
+
schema: SCHEMA;
|
2726
2858
|
/**
|
2727
|
-
|
2728
|
-
|
2729
|
-
|
2859
|
+
Optional name of the output that should be generated.
|
2860
|
+
Used by some providers for additional LLM guidance, e.g.
|
2861
|
+
via tool or schema name.
|
2862
|
+
*/
|
2863
|
+
schemaName?: string;
|
2730
2864
|
/**
|
2731
|
-
|
2732
|
-
|
2865
|
+
Optional description of the output that should be generated.
|
2866
|
+
Used by some providers for additional LLM guidance, e.g.
|
2867
|
+
via tool or schema description.
|
2868
|
+
*/
|
2869
|
+
schemaDescription?: string;
|
2870
|
+
/**
|
2871
|
+
The mode to use for object generation.
|
2733
2872
|
|
2734
|
-
|
2735
|
-
|
2736
|
-
|
2873
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
2874
|
+
|
2875
|
+
- 'auto': The provider will choose the best mode for the model.
|
2876
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
2877
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
2878
|
+
|
2879
|
+
Please note that most providers do not support all modes.
|
2880
|
+
|
2881
|
+
Default and recommended: 'auto' (best mode for the model).
|
2882
|
+
*/
|
2883
|
+
mode?: 'auto' | 'json' | 'tool';
|
2884
|
+
}) & {
|
2885
|
+
output?: OUTPUT;
|
2886
|
+
/**
|
2887
|
+
The language model to use.
|
2888
|
+
*/
|
2889
|
+
model: LanguageModel;
|
2737
2890
|
/**
|
2738
2891
|
Optional telemetry configuration (experimental).
|
2739
2892
|
*/
|
@@ -2742,156 +2895,108 @@ Optional telemetry configuration (experimental).
|
|
2742
2895
|
Additional provider-specific options. They are passed through
|
2743
2896
|
to the provider from the AI SDK and enable provider-specific
|
2744
2897
|
functionality that can be fully encapsulated in the provider.
|
2745
|
-
|
2898
|
+
*/
|
2746
2899
|
providerOptions?: ProviderOptions;
|
2747
2900
|
/**
|
2748
|
-
|
2749
|
-
|
2750
|
-
|
2901
|
+
Callback that is invoked when an error occurs during streaming.
|
2902
|
+
You can use it to log errors.
|
2903
|
+
The stream processing will pause until the callback promise is resolved.
|
2904
|
+
*/
|
2905
|
+
onError?: StreamObjectOnErrorCallback;
|
2751
2906
|
/**
|
2752
|
-
|
2753
|
-
|
2754
|
-
|
2755
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
2907
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
2908
|
+
*/
|
2909
|
+
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
2756
2910
|
/**
|
2757
|
-
|
2911
|
+
* Internal. For test use only. May change without notice.
|
2758
2912
|
*/
|
2759
|
-
|
2760
|
-
|
2761
|
-
|
2762
|
-
|
2763
|
-
|
2764
|
-
|
2765
|
-
@param options.stepNumber - The number of the step that is being executed.
|
2766
|
-
@param options.model - The model that is being used.
|
2913
|
+
_internal?: {
|
2914
|
+
generateId?: () => string;
|
2915
|
+
currentDate?: () => Date;
|
2916
|
+
now?: () => number;
|
2917
|
+
};
|
2918
|
+
}): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
2767
2919
|
|
2768
|
-
|
2769
|
-
|
2770
|
-
|
2771
|
-
|
2920
|
+
/**
|
2921
|
+
* A generated audio file.
|
2922
|
+
*/
|
2923
|
+
interface GeneratedAudioFile extends GeneratedFile {
|
2772
2924
|
/**
|
2773
|
-
|
2925
|
+
* Audio format of the file (e.g., 'mp3', 'wav', etc.)
|
2774
2926
|
*/
|
2775
|
-
|
2776
|
-
|
2777
|
-
Optional stream transformations.
|
2778
|
-
They are applied in the order they are provided.
|
2779
|
-
The stream transformations must maintain the stream structure for streamText to work correctly.
|
2780
|
-
*/
|
2781
|
-
experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
|
2782
|
-
/**
|
2783
|
-
Whether to include raw chunks from the provider in the stream.
|
2784
|
-
When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
|
2785
|
-
This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
|
2786
|
-
Defaults to false.
|
2787
|
-
*/
|
2788
|
-
includeRawChunks?: boolean;
|
2789
|
-
/**
|
2790
|
-
Callback that is called for each chunk of the stream.
|
2791
|
-
The stream processing will pause until the callback promise is resolved.
|
2792
|
-
*/
|
2793
|
-
onChunk?: StreamTextOnChunkCallback<TOOLS>;
|
2794
|
-
/**
|
2795
|
-
Callback that is invoked when an error occurs during streaming.
|
2796
|
-
You can use it to log errors.
|
2797
|
-
The stream processing will pause until the callback promise is resolved.
|
2798
|
-
*/
|
2799
|
-
onError?: StreamTextOnErrorCallback;
|
2800
|
-
/**
|
2801
|
-
Callback that is called when the LLM response and all request tool executions
|
2802
|
-
(for tools that have an `execute` function) are finished.
|
2803
|
-
|
2804
|
-
The usage is the combined usage of all steps.
|
2805
|
-
*/
|
2806
|
-
onFinish?: StreamTextOnFinishCallback<TOOLS>;
|
2807
|
-
/**
|
2808
|
-
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2809
|
-
*/
|
2810
|
-
onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
|
2811
|
-
/**
|
2812
|
-
Internal. For test use only. May change without notice.
|
2813
|
-
*/
|
2814
|
-
_internal?: {
|
2815
|
-
now?: () => number;
|
2816
|
-
generateId?: IdGenerator;
|
2817
|
-
currentDate?: () => Date;
|
2818
|
-
};
|
2819
|
-
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
2927
|
+
readonly format: string;
|
2928
|
+
}
|
2820
2929
|
|
2821
2930
|
/**
|
2822
|
-
The result of a `
|
2823
|
-
It contains the
|
2931
|
+
The result of a `generateSpeech` call.
|
2932
|
+
It contains the audio data and additional information.
|
2824
2933
|
*/
|
2825
|
-
interface
|
2934
|
+
interface SpeechResult {
|
2826
2935
|
/**
|
2827
|
-
|
2936
|
+
* The audio data as a base64 encoded string or binary data.
|
2828
2937
|
*/
|
2829
|
-
readonly
|
2830
|
-
/**
|
2831
|
-
The images that were generated.
|
2832
|
-
*/
|
2833
|
-
readonly images: Array<GeneratedFile>;
|
2938
|
+
readonly audio: GeneratedAudioFile;
|
2834
2939
|
/**
|
2835
|
-
|
2940
|
+
Warnings for the call, e.g. unsupported settings.
|
2836
2941
|
*/
|
2837
|
-
readonly warnings: Array<
|
2942
|
+
readonly warnings: Array<SpeechWarning>;
|
2838
2943
|
/**
|
2839
|
-
|
2944
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
2840
2945
|
*/
|
2841
|
-
readonly responses: Array<
|
2946
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
2842
2947
|
/**
|
2843
|
-
|
2844
|
-
* results that can be fully encapsulated in the provider.
|
2948
|
+
Provider metadata from the provider.
|
2845
2949
|
*/
|
2846
|
-
readonly providerMetadata:
|
2950
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
2847
2951
|
}
|
2848
2952
|
|
2849
2953
|
/**
|
2850
|
-
Generates
|
2954
|
+
Generates speech audio using a speech model.
|
2851
2955
|
|
2852
|
-
@param model - The
|
2853
|
-
@param
|
2854
|
-
@param
|
2855
|
-
@param
|
2856
|
-
@param
|
2857
|
-
@param
|
2956
|
+
@param model - The speech model to use.
|
2957
|
+
@param text - The text to convert to speech.
|
2958
|
+
@param voice - The voice to use for speech generation.
|
2959
|
+
@param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
|
2960
|
+
@param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
2961
|
+
@param speed - The speed of the speech generation.
|
2858
2962
|
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
2859
2963
|
as body parameters.
|
2860
2964
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2861
2965
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2862
2966
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2863
2967
|
|
2864
|
-
@returns A result object that contains the generated
|
2968
|
+
@returns A result object that contains the generated audio data.
|
2865
2969
|
*/
|
2866
|
-
declare function
|
2970
|
+
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
2867
2971
|
/**
|
2868
|
-
The
|
2972
|
+
The speech model to use.
|
2869
2973
|
*/
|
2870
|
-
model:
|
2974
|
+
model: SpeechModelV2;
|
2871
2975
|
/**
|
2872
|
-
The
|
2976
|
+
The text to convert to speech.
|
2873
2977
|
*/
|
2874
|
-
|
2978
|
+
text: string;
|
2875
2979
|
/**
|
2876
|
-
|
2980
|
+
The voice to use for speech generation.
|
2877
2981
|
*/
|
2878
|
-
|
2982
|
+
voice?: string;
|
2879
2983
|
/**
|
2880
|
-
|
2984
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
2881
2985
|
*/
|
2882
|
-
|
2986
|
+
outputFormat?: 'mp3' | 'wav' | (string & {});
|
2883
2987
|
/**
|
2884
|
-
|
2885
|
-
|
2886
|
-
|
2988
|
+
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
2989
|
+
*/
|
2990
|
+
instructions?: string;
|
2887
2991
|
/**
|
2888
|
-
|
2992
|
+
The speed of the speech generation.
|
2889
2993
|
*/
|
2890
|
-
|
2994
|
+
speed?: number;
|
2891
2995
|
/**
|
2892
|
-
|
2996
|
+
The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
2997
|
+
or "auto" for automatic language detection. Provider support varies.
|
2893
2998
|
*/
|
2894
|
-
|
2999
|
+
language?: string;
|
2895
3000
|
/**
|
2896
3001
|
Additional provider-specific options that are passed through to the provider
|
2897
3002
|
as body parameters.
|
@@ -2900,15 +3005,13 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
2900
3005
|
record is keyed by the provider-specific metadata key.
|
2901
3006
|
```ts
|
2902
3007
|
{
|
2903
|
-
"openai": {
|
2904
|
-
"style": "vivid"
|
2905
|
-
}
|
3008
|
+
"openai": {}
|
2906
3009
|
}
|
2907
3010
|
```
|
2908
3011
|
*/
|
2909
3012
|
providerOptions?: ProviderOptions;
|
2910
3013
|
/**
|
2911
|
-
Maximum number of retries per
|
3014
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
2912
3015
|
|
2913
3016
|
@default 2
|
2914
3017
|
*/
|
@@ -2922,1009 +3025,941 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
2922
3025
|
Only applicable for HTTP-based providers.
|
2923
3026
|
*/
|
2924
3027
|
headers?: Record<string, string>;
|
2925
|
-
}): Promise<
|
3028
|
+
}): Promise<SpeechResult>;
|
2926
3029
|
|
2927
3030
|
/**
|
2928
|
-
|
3031
|
+
* Applies default settings for a language model.
|
2929
3032
|
*/
|
2930
|
-
|
2931
|
-
|
2932
|
-
|
2933
|
-
|
2934
|
-
|
2935
|
-
|
2936
|
-
|
2937
|
-
|
2938
|
-
|
2939
|
-
|
2940
|
-
|
2941
|
-
|
2942
|
-
|
2943
|
-
|
2944
|
-
|
2945
|
-
|
2946
|
-
|
2947
|
-
/**
|
2948
|
-
Additional request information.
|
2949
|
-
*/
|
2950
|
-
readonly request: LanguageModelRequestMetadata;
|
2951
|
-
/**
|
2952
|
-
Additional response information.
|
2953
|
-
*/
|
2954
|
-
readonly response: LanguageModelResponseMetadata & {
|
2955
|
-
/**
|
2956
|
-
Response body (available only for providers that use HTTP requests).
|
2957
|
-
*/
|
2958
|
-
body?: unknown;
|
2959
|
-
};
|
2960
|
-
/**
|
2961
|
-
Additional provider-specific metadata. They are passed through
|
2962
|
-
from the provider to the AI SDK and enable provider-specific
|
2963
|
-
results that can be fully encapsulated in the provider.
|
2964
|
-
*/
|
2965
|
-
readonly providerMetadata: ProviderMetadata | undefined;
|
2966
|
-
/**
|
2967
|
-
Converts the object to a JSON response.
|
2968
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
2969
|
-
*/
|
2970
|
-
toJsonResponse(init?: ResponseInit): Response;
|
2971
|
-
}
|
3033
|
+
declare function defaultSettingsMiddleware({ settings, }: {
|
3034
|
+
settings: Partial<{
|
3035
|
+
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
|
3036
|
+
temperature?: LanguageModelV2CallOptions['temperature'];
|
3037
|
+
stopSequences?: LanguageModelV2CallOptions['stopSequences'];
|
3038
|
+
topP?: LanguageModelV2CallOptions['topP'];
|
3039
|
+
topK?: LanguageModelV2CallOptions['topK'];
|
3040
|
+
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
|
3041
|
+
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
|
3042
|
+
responseFormat?: LanguageModelV2CallOptions['responseFormat'];
|
3043
|
+
seed?: LanguageModelV2CallOptions['seed'];
|
3044
|
+
tools?: LanguageModelV2CallOptions['tools'];
|
3045
|
+
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
3046
|
+
headers?: LanguageModelV2CallOptions['headers'];
|
3047
|
+
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
3048
|
+
}>;
|
3049
|
+
}): LanguageModelV2Middleware;
|
2972
3050
|
|
2973
3051
|
/**
|
2974
|
-
|
2975
|
-
|
3052
|
+
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
3053
|
+
* as a `reasoning` property on the result.
|
3054
|
+
*
|
3055
|
+
* @param tagName - The name of the XML tag to extract reasoning from.
|
3056
|
+
* @param separator - The separator to use between reasoning and text sections.
|
3057
|
+
* @param startWithReasoning - Whether to start with reasoning tokens.
|
3058
|
+
*/
|
3059
|
+
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
3060
|
+
tagName: string;
|
3061
|
+
separator?: string;
|
3062
|
+
startWithReasoning?: boolean;
|
3063
|
+
}): LanguageModelV2Middleware;
|
2976
3064
|
|
2977
|
-
Should return the repaired text or null if the text cannot be repaired.
|
2978
|
-
*/
|
2979
|
-
type RepairTextFunction = (options: {
|
2980
|
-
text: string;
|
2981
|
-
error: JSONParseError | TypeValidationError;
|
2982
|
-
}) => Promise<string | null>;
|
2983
3065
|
/**
|
2984
|
-
|
2985
|
-
|
2986
|
-
|
2987
|
-
|
2988
|
-
@param model - The language model to use.
|
2989
|
-
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
3066
|
+
* Simulates streaming chunks with the response from a generate call.
|
3067
|
+
*/
|
3068
|
+
declare function simulateStreamingMiddleware(): LanguageModelV2Middleware;
|
2990
3069
|
|
2991
|
-
|
2992
|
-
|
2993
|
-
|
3070
|
+
/**
|
3071
|
+
* Wraps a LanguageModelV2 instance with middleware functionality.
|
3072
|
+
* This function allows you to apply middleware to transform parameters,
|
3073
|
+
* wrap generate operations, and wrap stream operations of a language model.
|
3074
|
+
*
|
3075
|
+
* @param options - Configuration options for wrapping the language model.
|
3076
|
+
* @param options.model - The original LanguageModelV2 instance to be wrapped.
|
3077
|
+
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
3078
|
+
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
3079
|
+
* @param options.providerId - Optional custom provider ID to override the original model's provider.
|
3080
|
+
* @returns A new LanguageModelV2 instance with middleware applied.
|
3081
|
+
*/
|
3082
|
+
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
3083
|
+
model: LanguageModelV2;
|
3084
|
+
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
3085
|
+
modelId?: string;
|
3086
|
+
providerId?: string;
|
3087
|
+
}) => LanguageModelV2;
|
2994
3088
|
|
2995
|
-
|
2996
|
-
|
2997
|
-
|
2998
|
-
|
2999
|
-
@param
|
3000
|
-
|
3001
|
-
|
3002
|
-
@param
|
3003
|
-
|
3004
|
-
|
3005
|
-
@
|
3006
|
-
|
3007
|
-
|
3008
|
-
|
3009
|
-
|
3010
|
-
|
3011
|
-
|
3012
|
-
|
3013
|
-
|
3014
|
-
|
3015
|
-
|
3016
|
-
|
3017
|
-
|
3018
|
-
|
3019
|
-
|
3020
|
-
|
3021
|
-
|
3022
|
-
|
3023
|
-
|
3024
|
-
|
3025
|
-
|
3026
|
-
|
3089
|
+
/**
|
3090
|
+
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
3091
|
+
*
|
3092
|
+
* @param {Object} options - The options for creating the custom provider.
|
3093
|
+
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
3094
|
+
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
3095
|
+
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
3096
|
+
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
3097
|
+
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
3098
|
+
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
3099
|
+
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
3100
|
+
*
|
3101
|
+
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
3102
|
+
*/
|
3103
|
+
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV2>, SPEECH_MODELS extends Record<string, SpeechModelV2>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
3104
|
+
languageModels?: LANGUAGE_MODELS;
|
3105
|
+
textEmbeddingModels?: EMBEDDING_MODELS;
|
3106
|
+
imageModels?: IMAGE_MODELS;
|
3107
|
+
transcriptionModels?: TRANSCRIPTION_MODELS;
|
3108
|
+
speechModels?: SPEECH_MODELS;
|
3109
|
+
fallbackProvider?: ProviderV2;
|
3110
|
+
}): ProviderV2 & {
|
3111
|
+
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
3112
|
+
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
|
3113
|
+
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
3114
|
+
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV2;
|
3115
|
+
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV2;
|
3116
|
+
};
|
3117
|
+
/**
|
3118
|
+
* @deprecated Use `customProvider` instead.
|
3119
|
+
*/
|
3120
|
+
declare const experimental_customProvider: typeof customProvider;
|
3121
|
+
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
3027
3122
|
|
3028
|
-
|
3123
|
+
declare const symbol: unique symbol;
|
3124
|
+
declare class NoSuchProviderError extends NoSuchModelError {
|
3125
|
+
private readonly [symbol];
|
3126
|
+
readonly providerId: string;
|
3127
|
+
readonly availableProviders: string[];
|
3128
|
+
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
3129
|
+
modelId: string;
|
3130
|
+
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
|
3131
|
+
providerId: string;
|
3132
|
+
availableProviders: string[];
|
3133
|
+
message?: string;
|
3134
|
+
});
|
3135
|
+
static isInstance(error: unknown): error is NoSuchProviderError;
|
3136
|
+
}
|
3029
3137
|
|
3030
|
-
|
3031
|
-
|
3032
|
-
|
3033
|
-
|
3138
|
+
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
3139
|
+
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
3140
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
3141
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
3142
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
|
3143
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
|
3144
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
3145
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
3146
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV2;
|
3147
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV2;
|
3148
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV2;
|
3149
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV2;
|
3150
|
+
}
|
3151
|
+
/**
|
3152
|
+
* Creates a registry for the given providers.
|
3153
|
+
*/
|
3154
|
+
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
|
3155
|
+
separator?: SEPARATOR;
|
3156
|
+
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
3157
|
+
/**
|
3158
|
+
* @deprecated Use `createProviderRegistry` instead.
|
3159
|
+
*/
|
3160
|
+
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
3034
3161
|
|
3035
|
-
|
3036
|
-
|
3162
|
+
declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
|
3163
|
+
textStream: ReadableStream<string>;
|
3164
|
+
}): Response;
|
3037
3165
|
|
3038
|
-
|
3166
|
+
declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
|
3167
|
+
response: ServerResponse;
|
3168
|
+
textStream: ReadableStream<string>;
|
3169
|
+
} & ResponseInit): void;
|
3039
3170
|
|
3040
|
-
|
3041
|
-
|
3042
|
-
|
3171
|
+
declare const JSONRPCRequestSchema: z.ZodObject<{
|
3172
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3173
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3174
|
+
method: z.ZodString;
|
3175
|
+
params: z.ZodOptional<z.ZodObject<{
|
3176
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3177
|
+
}, z.core.$loose>>;
|
3178
|
+
}, z.core.$strict>;
|
3179
|
+
type JSONRPCRequest = z.infer<typeof JSONRPCRequestSchema>;
|
3180
|
+
declare const JSONRPCResponseSchema: z.ZodObject<{
|
3181
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3182
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3183
|
+
result: z.ZodObject<{
|
3184
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3185
|
+
}, z.core.$loose>;
|
3186
|
+
}, z.core.$strict>;
|
3187
|
+
type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
|
3188
|
+
declare const JSONRPCErrorSchema: z.ZodObject<{
|
3189
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3190
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3191
|
+
error: z.ZodObject<{
|
3192
|
+
code: z.ZodNumber;
|
3193
|
+
message: z.ZodString;
|
3194
|
+
data: z.ZodOptional<z.ZodUnknown>;
|
3195
|
+
}, z.core.$strip>;
|
3196
|
+
}, z.core.$strict>;
|
3197
|
+
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
3198
|
+
declare const JSONRPCNotificationSchema: z.ZodObject<{
|
3199
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3200
|
+
method: z.ZodString;
|
3201
|
+
params: z.ZodOptional<z.ZodObject<{
|
3202
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3203
|
+
}, z.core.$loose>>;
|
3204
|
+
}, z.core.$strict>;
|
3205
|
+
type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
|
3206
|
+
declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
|
3207
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3208
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3209
|
+
method: z.ZodString;
|
3210
|
+
params: z.ZodOptional<z.ZodObject<{
|
3211
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3212
|
+
}, z.core.$loose>>;
|
3213
|
+
}, z.core.$strict>, z.ZodObject<{
|
3214
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3215
|
+
method: z.ZodString;
|
3216
|
+
params: z.ZodOptional<z.ZodObject<{
|
3217
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3218
|
+
}, z.core.$loose>>;
|
3219
|
+
}, z.core.$strict>, z.ZodObject<{
|
3220
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3221
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3222
|
+
result: z.ZodObject<{
|
3223
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3224
|
+
}, z.core.$loose>;
|
3225
|
+
}, z.core.$strict>, z.ZodObject<{
|
3226
|
+
jsonrpc: z.ZodLiteral<"2.0">;
|
3227
|
+
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3228
|
+
error: z.ZodObject<{
|
3229
|
+
code: z.ZodNumber;
|
3230
|
+
message: z.ZodString;
|
3231
|
+
data: z.ZodOptional<z.ZodUnknown>;
|
3232
|
+
}, z.core.$strip>;
|
3233
|
+
}, z.core.$strict>]>;
|
3234
|
+
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
3043
3235
|
|
3044
|
-
|
3045
|
-
|
3236
|
+
/**
|
3237
|
+
* Transport interface for MCP (Model Context Protocol) communication.
|
3238
|
+
* Maps to the `Transport` interface in the MCP spec.
|
3046
3239
|
*/
|
3047
|
-
|
3048
|
-
/**
|
3049
|
-
The enum values that the model should use.
|
3050
|
-
*/
|
3051
|
-
enum: Array<RESULT>;
|
3052
|
-
mode?: 'json';
|
3053
|
-
output: 'enum';
|
3054
|
-
} : OUTPUT extends 'no-schema' ? {} : {
|
3055
|
-
/**
|
3056
|
-
The schema of the object that the model should generate.
|
3057
|
-
*/
|
3058
|
-
schema: SCHEMA;
|
3240
|
+
interface MCPTransport {
|
3059
3241
|
/**
|
3060
|
-
|
3061
|
-
|
3062
|
-
|
3063
|
-
*/
|
3064
|
-
schemaName?: string;
|
3242
|
+
* Initialize and start the transport
|
3243
|
+
*/
|
3244
|
+
start(): Promise<void>;
|
3065
3245
|
/**
|
3066
|
-
|
3067
|
-
|
3068
|
-
|
3069
|
-
|
3070
|
-
schemaDescription?: string;
|
3246
|
+
* Send a JSON-RPC message through the transport
|
3247
|
+
* @param message The JSON-RPC message to send
|
3248
|
+
*/
|
3249
|
+
send(message: JSONRPCMessage): Promise<void>;
|
3071
3250
|
/**
|
3072
|
-
|
3073
|
-
|
3074
|
-
|
3075
|
-
|
3076
|
-
- 'auto': The provider will choose the best mode for the model.
|
3077
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
3078
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
3079
|
-
|
3080
|
-
Please note that most providers do not support all modes.
|
3081
|
-
|
3082
|
-
Default and recommended: 'auto' (best mode for the model).
|
3083
|
-
*/
|
3084
|
-
mode?: 'auto' | 'json' | 'tool';
|
3085
|
-
}) & {
|
3086
|
-
output?: OUTPUT;
|
3251
|
+
* Clean up and close the transport
|
3252
|
+
*/
|
3253
|
+
close(): Promise<void>;
|
3087
3254
|
/**
|
3088
|
-
|
3255
|
+
* Event handler for transport closure
|
3089
3256
|
*/
|
3090
|
-
|
3257
|
+
onclose?: () => void;
|
3091
3258
|
/**
|
3092
|
-
|
3093
|
-
to enable JSON parsing.
|
3259
|
+
* Event handler for transport errors
|
3094
3260
|
*/
|
3095
|
-
|
3261
|
+
onerror?: (error: Error) => void;
|
3096
3262
|
/**
|
3097
|
-
|
3098
|
-
|
3099
|
-
|
3263
|
+
* Event handler for received messages
|
3264
|
+
*/
|
3265
|
+
onmessage?: (message: JSONRPCMessage) => void;
|
3266
|
+
}
|
3267
|
+
type MCPTransportConfig = {
|
3268
|
+
type: 'sse';
|
3100
3269
|
/**
|
3101
|
-
|
3102
|
-
|
3103
|
-
|
3104
|
-
*/
|
3105
|
-
providerOptions?: ProviderOptions;
|
3270
|
+
* The URL of the MCP server.
|
3271
|
+
*/
|
3272
|
+
url: string;
|
3106
3273
|
/**
|
3107
|
-
*
|
3274
|
+
* Additional HTTP headers to be sent with requests.
|
3108
3275
|
*/
|
3109
|
-
|
3110
|
-
|
3111
|
-
|
3112
|
-
|
3113
|
-
|
3276
|
+
headers?: Record<string, string>;
|
3277
|
+
};
|
3278
|
+
|
3279
|
+
type ToolSchemas = Record<string, {
|
3280
|
+
inputSchema: FlexibleSchema<JSONObject | unknown>;
|
3281
|
+
}> | 'automatic' | undefined;
|
3282
|
+
type MappedTool<T extends Tool | JSONObject, OUTPUT extends any> = T extends Tool<infer INPUT> ? Tool<INPUT, OUTPUT> : T extends JSONObject ? Tool<T, OUTPUT> : never;
|
3283
|
+
type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
|
3284
|
+
inputSchema: FlexibleSchema<unknown>;
|
3285
|
+
}> ? {
|
3286
|
+
[K in keyof TOOL_SCHEMAS]: MappedTool<TOOL_SCHEMAS[K], CallToolResult> & Required<Pick<MappedTool<TOOL_SCHEMAS[K], CallToolResult>, 'execute'>>;
|
3287
|
+
} : McpToolSet<Record<string, {
|
3288
|
+
inputSchema: FlexibleSchema<unknown>;
|
3289
|
+
}>>;
|
3290
|
+
declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<{
|
3291
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3292
|
+
content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
3293
|
+
type: z.ZodLiteral<"text">;
|
3294
|
+
text: z.ZodString;
|
3295
|
+
}, z.core.$loose>, z.ZodObject<{
|
3296
|
+
type: z.ZodLiteral<"image">;
|
3297
|
+
data: z.ZodString;
|
3298
|
+
mimeType: z.ZodString;
|
3299
|
+
}, z.core.$loose>, z.ZodObject<{
|
3300
|
+
type: z.ZodLiteral<"resource">;
|
3301
|
+
resource: z.ZodUnion<readonly [z.ZodObject<{
|
3302
|
+
uri: z.ZodString;
|
3303
|
+
mimeType: z.ZodOptional<z.ZodString>;
|
3304
|
+
text: z.ZodString;
|
3305
|
+
}, z.core.$loose>, z.ZodObject<{
|
3306
|
+
uri: z.ZodString;
|
3307
|
+
mimeType: z.ZodOptional<z.ZodString>;
|
3308
|
+
blob: z.ZodString;
|
3309
|
+
}, z.core.$loose>]>;
|
3310
|
+
}, z.core.$loose>]>>;
|
3311
|
+
isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
|
3312
|
+
}, z.core.$loose>, z.ZodObject<{
|
3313
|
+
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3314
|
+
toolResult: z.ZodUnknown;
|
3315
|
+
}, z.core.$loose>]>;
|
3316
|
+
type CallToolResult = z.infer<typeof CallToolResultSchema>;
|
3114
3317
|
|
3318
|
+
interface MCPClientConfig {
|
3319
|
+
/** Transport configuration for connecting to the MCP server */
|
3320
|
+
transport: MCPTransportConfig | MCPTransport;
|
3321
|
+
/** Optional callback for uncaught errors */
|
3322
|
+
onUncaughtError?: (error: unknown) => void;
|
3323
|
+
/** Optional client name, defaults to 'ai-sdk-mcp-client' */
|
3324
|
+
name?: string;
|
3325
|
+
}
|
3326
|
+
declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
|
3115
3327
|
/**
|
3116
|
-
|
3328
|
+
* A lightweight MCP Client implementation
|
3329
|
+
*
|
3330
|
+
* The primary purpose of this client is tool conversion between MCP<>AI SDK
|
3331
|
+
* but can later be extended to support other MCP features
|
3332
|
+
*
|
3333
|
+
* Tool parameters are automatically inferred from the server's JSON schema
|
3334
|
+
* if not explicitly provided in the tools configuration
|
3335
|
+
*
|
3336
|
+
* This client is meant to be used to communicate with a single server. To communicate and fetch tools across multiple servers, it's recommended to create a new client instance per server.
|
3337
|
+
*
|
3338
|
+
* Not supported:
|
3339
|
+
* - Client options (e.g. sampling, roots) as they are not needed for tool conversion
|
3340
|
+
* - Accepting notifications
|
3341
|
+
* - Session management (when passing a sessionId to an instance of the Streamable HTTP transport)
|
3342
|
+
* - Resumable SSE streams
|
3117
3343
|
*/
|
3118
|
-
|
3119
|
-
|
3120
|
-
|
3121
|
-
|
3122
|
-
|
3123
|
-
|
3124
|
-
|
3125
|
-
|
3126
|
-
|
3344
|
+
declare class MCPClient {
|
3345
|
+
private transport;
|
3346
|
+
private onUncaughtError?;
|
3347
|
+
private clientInfo;
|
3348
|
+
private requestMessageId;
|
3349
|
+
private responseHandlers;
|
3350
|
+
private serverCapabilities;
|
3351
|
+
private isClosed;
|
3352
|
+
constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
|
3353
|
+
init(): Promise<this>;
|
3354
|
+
close(): Promise<void>;
|
3355
|
+
private assertCapability;
|
3356
|
+
private request;
|
3357
|
+
private listTools;
|
3358
|
+
private callTool;
|
3359
|
+
private notification;
|
3127
3360
|
/**
|
3128
|
-
|
3129
|
-
|
3130
|
-
results that can be fully encapsulated in the provider.
|
3361
|
+
* Returns a set of AI SDK tools from the MCP server
|
3362
|
+
* @returns A record of tool names to their implementations
|
3131
3363
|
*/
|
3132
|
-
|
3133
|
-
|
3134
|
-
|
3135
|
-
|
3136
|
-
|
3137
|
-
|
3138
|
-
|
3139
|
-
|
3140
|
-
|
3364
|
+
tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>({ schemas, }?: {
|
3365
|
+
schemas?: TOOL_SCHEMAS;
|
3366
|
+
}): Promise<McpToolSet<TOOL_SCHEMAS>>;
|
3367
|
+
private onClose;
|
3368
|
+
private onError;
|
3369
|
+
private onResponse;
|
3370
|
+
}
|
3371
|
+
|
3372
|
+
/**
|
3373
|
+
The result of a `transcribe` call.
|
3374
|
+
It contains the transcript and additional information.
|
3375
|
+
*/
|
3376
|
+
interface TranscriptionResult {
|
3141
3377
|
/**
|
3142
|
-
|
3143
|
-
|
3144
|
-
readonly
|
3378
|
+
* The complete transcribed text from the audio.
|
3379
|
+
*/
|
3380
|
+
readonly text: string;
|
3145
3381
|
/**
|
3146
|
-
|
3147
|
-
|
3148
|
-
|
3149
|
-
|
3150
|
-
|
3151
|
-
|
3382
|
+
* Array of transcript segments with timing information.
|
3383
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
3384
|
+
*/
|
3385
|
+
readonly segments: Array<{
|
3386
|
+
/**
|
3387
|
+
* The text content of this segment.
|
3388
|
+
*/
|
3389
|
+
readonly text: string;
|
3390
|
+
/**
|
3391
|
+
* The start time of this segment in seconds.
|
3392
|
+
*/
|
3393
|
+
readonly startSecond: number;
|
3394
|
+
/**
|
3395
|
+
* The end time of this segment in seconds.
|
3396
|
+
*/
|
3397
|
+
readonly endSecond: number;
|
3398
|
+
}>;
|
3152
3399
|
/**
|
3153
|
-
*
|
3400
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
3401
|
+
* May be undefined if the language couldn't be detected.
|
3154
3402
|
*/
|
3155
|
-
readonly
|
3403
|
+
readonly language: string | undefined;
|
3156
3404
|
/**
|
3157
|
-
|
3158
|
-
|
3159
|
-
|
3160
|
-
readonly
|
3405
|
+
* The total duration of the audio file in seconds.
|
3406
|
+
* May be undefined if the duration couldn't be determined.
|
3407
|
+
*/
|
3408
|
+
readonly durationInSeconds: number | undefined;
|
3161
3409
|
/**
|
3162
|
-
|
3163
|
-
Only errors that stop the stream, such as network errors, are thrown.
|
3410
|
+
Warnings for the call, e.g. unsupported settings.
|
3164
3411
|
*/
|
3165
|
-
readonly
|
3412
|
+
readonly warnings: Array<TranscriptionWarning>;
|
3166
3413
|
/**
|
3167
|
-
|
3168
|
-
|
3169
|
-
|
3170
|
-
|
3171
|
-
@param response A Node.js response-like object (ServerResponse).
|
3172
|
-
@param init Optional headers, status code, and status text.
|
3173
|
-
*/
|
3174
|
-
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
3414
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
3415
|
+
*/
|
3416
|
+
readonly responses: Array<TranscriptionModelResponseMetadata>;
|
3175
3417
|
/**
|
3176
|
-
|
3177
|
-
|
3178
|
-
|
3179
|
-
Non-text-delta events are ignored.
|
3180
|
-
|
3181
|
-
@param init Optional headers, status code, and status text.
|
3182
|
-
*/
|
3183
|
-
toTextStreamResponse(init?: ResponseInit): Response;
|
3418
|
+
Provider metadata from the provider.
|
3419
|
+
*/
|
3420
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
3184
3421
|
}
|
3185
|
-
type ObjectStreamPart<PARTIAL> = {
|
3186
|
-
type: 'object';
|
3187
|
-
object: PARTIAL;
|
3188
|
-
} | {
|
3189
|
-
type: 'text-delta';
|
3190
|
-
textDelta: string;
|
3191
|
-
} | {
|
3192
|
-
type: 'error';
|
3193
|
-
error: unknown;
|
3194
|
-
} | {
|
3195
|
-
type: 'finish';
|
3196
|
-
finishReason: FinishReason;
|
3197
|
-
usage: LanguageModelUsage;
|
3198
|
-
response: LanguageModelResponseMetadata;
|
3199
|
-
providerMetadata?: ProviderMetadata;
|
3200
|
-
};
|
3201
3422
|
|
3202
3423
|
/**
|
3203
|
-
|
3424
|
+
Generates transcripts using a transcription model.
|
3204
3425
|
|
3205
|
-
@param
|
3206
|
-
|
3207
|
-
|
3208
|
-
|
3209
|
-
|
3210
|
-
|
3211
|
-
|
3426
|
+
@param model - The transcription model to use.
|
3427
|
+
@param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
|
3428
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
3429
|
+
as body parameters.
|
3430
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3431
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3432
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3212
3433
|
|
3213
|
-
@
|
3434
|
+
@returns A result object that contains the generated transcript.
|
3214
3435
|
*/
|
3215
|
-
|
3216
|
-
/**
|
3217
|
-
The token usage of the generated response.
|
3218
|
-
*/
|
3219
|
-
usage: LanguageModelUsage;
|
3436
|
+
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3220
3437
|
/**
|
3221
|
-
The
|
3222
|
-
|
3223
|
-
|
3438
|
+
The transcription model to use.
|
3439
|
+
*/
|
3440
|
+
model: TranscriptionModelV2;
|
3224
3441
|
/**
|
3225
|
-
|
3226
|
-
|
3227
|
-
|
3442
|
+
The audio data to transcribe.
|
3443
|
+
*/
|
3444
|
+
audio: DataContent | URL;
|
3228
3445
|
/**
|
3229
|
-
|
3230
|
-
|
3231
|
-
|
3446
|
+
Additional provider-specific options that are passed through to the provider
|
3447
|
+
as body parameters.
|
3448
|
+
|
3449
|
+
The outer record is keyed by the provider name, and the inner
|
3450
|
+
record is keyed by the provider-specific metadata key.
|
3451
|
+
```ts
|
3452
|
+
{
|
3453
|
+
"openai": {
|
3454
|
+
"temperature": 0
|
3455
|
+
}
|
3456
|
+
}
|
3457
|
+
```
|
3458
|
+
*/
|
3459
|
+
providerOptions?: ProviderOptions;
|
3232
3460
|
/**
|
3233
|
-
|
3234
|
-
|
3235
|
-
|
3236
|
-
|
3237
|
-
|
3238
|
-
to the provider from the AI SDK and enable provider-specific
|
3239
|
-
functionality that can be fully encapsulated in the provider.
|
3240
|
-
*/
|
3241
|
-
providerMetadata: ProviderMetadata | undefined;
|
3242
|
-
}) => Promise<void> | void;
|
3243
|
-
/**
|
3244
|
-
Generate a structured, typed object for a given prompt and schema using a language model.
|
3245
|
-
|
3246
|
-
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
3247
|
-
|
3248
|
-
@param model - The language model to use.
|
3249
|
-
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
3250
|
-
|
3251
|
-
@param system - A system message that will be part of the prompt.
|
3252
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
3253
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
3254
|
-
|
3255
|
-
@param maxOutputTokens - Maximum number of tokens to generate.
|
3256
|
-
@param temperature - Temperature setting.
|
3257
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3258
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
3259
|
-
@param topP - Nucleus sampling.
|
3260
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3261
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
3262
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
3263
|
-
Used to remove "long tail" low probability responses.
|
3264
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
3265
|
-
@param presencePenalty - Presence penalty setting.
|
3266
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
3267
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3268
|
-
@param frequencyPenalty - Frequency penalty setting.
|
3269
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
3270
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
3271
|
-
@param stopSequences - Stop sequences.
|
3272
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
3273
|
-
@param seed - The seed (integer) to use for random sampling.
|
3274
|
-
If set and supported by the model, calls will generate deterministic results.
|
3275
|
-
|
3276
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3277
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3278
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3279
|
-
|
3280
|
-
@param schema - The schema of the object that the model should generate.
|
3281
|
-
@param schemaName - Optional name of the output that should be generated.
|
3282
|
-
Used by some providers for additional LLM guidance, e.g.
|
3283
|
-
via tool or schema name.
|
3284
|
-
@param schemaDescription - Optional description of the output that should be generated.
|
3285
|
-
Used by some providers for additional LLM guidance, e.g.
|
3286
|
-
via tool or schema description.
|
3287
|
-
|
3288
|
-
@param output - The type of the output.
|
3289
|
-
|
3290
|
-
- 'object': The output is an object.
|
3291
|
-
- 'array': The output is an array.
|
3292
|
-
- 'enum': The output is an enum.
|
3293
|
-
- 'no-schema': The output is not a schema.
|
3294
|
-
|
3295
|
-
@param experimental_telemetry - Optional telemetry configuration (experimental).
|
3296
|
-
|
3297
|
-
@param providerOptions - Additional provider-specific options. They are passed through
|
3298
|
-
to the provider from the AI SDK and enable provider-specific
|
3299
|
-
functionality that can be fully encapsulated in the provider.
|
3300
|
-
|
3301
|
-
@returns
|
3302
|
-
A result object for accessing the partial object stream and additional information.
|
3303
|
-
*/
|
3304
|
-
declare function streamObject<SCHEMA extends z3.Schema | z4$1.ZodType | Schema = z4$1.ZodType<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
3305
|
-
/**
|
3306
|
-
The enum values that the model should use.
|
3307
|
-
*/
|
3308
|
-
enum: Array<RESULT>;
|
3309
|
-
mode?: 'json';
|
3310
|
-
output: 'enum';
|
3311
|
-
} : OUTPUT extends 'no-schema' ? {} : {
|
3312
|
-
/**
|
3313
|
-
The schema of the object that the model should generate.
|
3314
|
-
*/
|
3315
|
-
schema: SCHEMA;
|
3316
|
-
/**
|
3317
|
-
Optional name of the output that should be generated.
|
3318
|
-
Used by some providers for additional LLM guidance, e.g.
|
3319
|
-
via tool or schema name.
|
3320
|
-
*/
|
3321
|
-
schemaName?: string;
|
3461
|
+
Maximum number of retries per transcript model call. Set to 0 to disable retries.
|
3462
|
+
|
3463
|
+
@default 2
|
3464
|
+
*/
|
3465
|
+
maxRetries?: number;
|
3322
3466
|
/**
|
3323
|
-
|
3324
|
-
|
3325
|
-
|
3326
|
-
*/
|
3327
|
-
schemaDescription?: string;
|
3467
|
+
Abort signal.
|
3468
|
+
*/
|
3469
|
+
abortSignal?: AbortSignal;
|
3328
3470
|
/**
|
3329
|
-
|
3330
|
-
|
3331
|
-
|
3332
|
-
|
3333
|
-
|
3334
|
-
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
3335
|
-
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
3471
|
+
Additional headers to include in the request.
|
3472
|
+
Only applicable for HTTP-based providers.
|
3473
|
+
*/
|
3474
|
+
headers?: Record<string, string>;
|
3475
|
+
}): Promise<TranscriptionResult>;
|
3336
3476
|
|
3337
|
-
|
3477
|
+
declare const getOriginalFetch: () => typeof fetch;
|
3478
|
+
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
3479
|
+
api: string;
|
3480
|
+
prompt: string;
|
3481
|
+
credentials: RequestCredentials | undefined;
|
3482
|
+
headers: HeadersInit | undefined;
|
3483
|
+
body: Record<string, any>;
|
3484
|
+
streamProtocol: 'data' | 'text' | undefined;
|
3485
|
+
setCompletion: (completion: string) => void;
|
3486
|
+
setLoading: (loading: boolean) => void;
|
3487
|
+
setError: (error: Error | undefined) => void;
|
3488
|
+
setAbortController: (abortController: AbortController | null) => void;
|
3489
|
+
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
3490
|
+
onError: ((error: Error) => void) | undefined;
|
3491
|
+
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
3492
|
+
}): Promise<string | null | undefined>;
|
3338
3493
|
|
3339
|
-
|
3340
|
-
*/
|
3341
|
-
mode?: 'auto' | 'json' | 'tool';
|
3342
|
-
}) & {
|
3343
|
-
output?: OUTPUT;
|
3344
|
-
/**
|
3345
|
-
The language model to use.
|
3346
|
-
*/
|
3347
|
-
model: LanguageModel;
|
3494
|
+
interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
|
3348
3495
|
/**
|
3349
|
-
|
3496
|
+
* Appends a data stream part to the stream.
|
3350
3497
|
*/
|
3351
|
-
|
3352
|
-
/**
|
3353
|
-
Additional provider-specific options. They are passed through
|
3354
|
-
to the provider from the AI SDK and enable provider-specific
|
3355
|
-
functionality that can be fully encapsulated in the provider.
|
3356
|
-
*/
|
3357
|
-
providerOptions?: ProviderOptions;
|
3358
|
-
/**
|
3359
|
-
Callback that is invoked when an error occurs during streaming.
|
3360
|
-
You can use it to log errors.
|
3361
|
-
The stream processing will pause until the callback promise is resolved.
|
3362
|
-
*/
|
3363
|
-
onError?: StreamObjectOnErrorCallback;
|
3364
|
-
/**
|
3365
|
-
Callback that is called when the LLM response and the final object validation are finished.
|
3366
|
-
*/
|
3367
|
-
onFinish?: StreamObjectOnFinishCallback<RESULT>;
|
3498
|
+
write(part: InferUIMessageChunk<UI_MESSAGE>): void;
|
3368
3499
|
/**
|
3369
|
-
*
|
3500
|
+
* Merges the contents of another stream to this stream.
|
3370
3501
|
*/
|
3371
|
-
|
3372
|
-
generateId?: () => string;
|
3373
|
-
currentDate?: () => Date;
|
3374
|
-
now?: () => number;
|
3375
|
-
};
|
3376
|
-
}): StreamObjectResult<OUTPUT extends 'enum' ? string : OUTPUT extends 'array' ? RESULT : DeepPartial<RESULT>, OUTPUT extends 'array' ? RESULT : RESULT, OUTPUT extends 'array' ? RESULT extends Array<infer U> ? AsyncIterableStream<U> : never : never>;
|
3377
|
-
|
3378
|
-
/**
|
3379
|
-
* A generated audio file.
|
3380
|
-
*/
|
3381
|
-
interface GeneratedAudioFile extends GeneratedFile {
|
3502
|
+
merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
|
3382
3503
|
/**
|
3383
|
-
*
|
3504
|
+
* Error handler that is used by the data stream writer.
|
3505
|
+
* This is intended for forwarding when merging streams
|
3506
|
+
* to prevent duplicated error masking.
|
3384
3507
|
*/
|
3385
|
-
|
3508
|
+
onError: ErrorHandler | undefined;
|
3386
3509
|
}
|
3387
3510
|
|
3388
|
-
|
3389
|
-
|
3390
|
-
|
3391
|
-
|
3392
|
-
|
3393
|
-
/**
|
3394
|
-
* The audio data as a base64 encoded string or binary data.
|
3395
|
-
*/
|
3396
|
-
readonly audio: GeneratedAudioFile;
|
3397
|
-
/**
|
3398
|
-
Warnings for the call, e.g. unsupported settings.
|
3399
|
-
*/
|
3400
|
-
readonly warnings: Array<SpeechWarning>;
|
3401
|
-
/**
|
3402
|
-
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
3403
|
-
*/
|
3404
|
-
readonly responses: Array<SpeechModelResponseMetadata>;
|
3511
|
+
declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
|
3512
|
+
execute: (options: {
|
3513
|
+
writer: UIMessageStreamWriter<UI_MESSAGE>;
|
3514
|
+
}) => Promise<void> | void;
|
3515
|
+
onError?: (error: unknown) => string;
|
3405
3516
|
/**
|
3406
|
-
|
3517
|
+
* The original messages. If they are provided, persistence mode is assumed,
|
3518
|
+
* and a message ID is provided for the response message.
|
3407
3519
|
*/
|
3408
|
-
|
3520
|
+
originalMessages?: UI_MESSAGE[];
|
3521
|
+
onFinish?: (options: {
|
3522
|
+
/**
|
3523
|
+
* The updates list of UI messages.
|
3524
|
+
*/
|
3525
|
+
messages: UI_MESSAGE[];
|
3526
|
+
/**
|
3527
|
+
* Indicates whether the response message is a continuation of the last original message,
|
3528
|
+
* or if a new message was created.
|
3529
|
+
*/
|
3530
|
+
isContinuation: boolean;
|
3531
|
+
/**
|
3532
|
+
* The message that was sent to the client as a response
|
3533
|
+
* (including the original message if it was extended).
|
3534
|
+
*/
|
3535
|
+
responseMessage: UI_MESSAGE;
|
3536
|
+
}) => void;
|
3537
|
+
generateId?: IdGenerator;
|
3538
|
+
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
3539
|
+
|
3540
|
+
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
|
3541
|
+
stream: ReadableStream<UIMessageChunk>;
|
3542
|
+
}): Response;
|
3543
|
+
|
3544
|
+
declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
|
3545
|
+
constructor();
|
3409
3546
|
}
|
3410
3547
|
|
3548
|
+
declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
|
3549
|
+
response: ServerResponse;
|
3550
|
+
stream: ReadableStream<UIMessageChunk>;
|
3551
|
+
} & UIMessageStreamResponseInit): void;
|
3552
|
+
|
3411
3553
|
/**
|
3412
|
-
|
3554
|
+
* Transforms a stream of `UIMessageChunk`s into an `AsyncIterableStream` of `UIMessage`s.
|
3555
|
+
*
|
3556
|
+
* @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
|
3557
|
+
* @param options.stream - The stream of `UIMessageChunk`s to read.
|
3558
|
+
*
|
3559
|
+
* @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
|
3560
|
+
* as it is being completed.
|
3561
|
+
*/
|
3562
|
+
declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, }: {
|
3563
|
+
message?: UI_MESSAGE;
|
3564
|
+
stream: ReadableStream<UIMessageChunk>;
|
3565
|
+
}): AsyncIterableStream<UI_MESSAGE>;
|
3413
3566
|
|
3414
|
-
|
3415
|
-
|
3416
|
-
|
3417
|
-
|
3418
|
-
|
3419
|
-
|
3420
|
-
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
3421
|
-
as body parameters.
|
3422
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3423
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3424
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3425
|
-
|
3426
|
-
@returns A result object that contains the generated audio data.
|
3427
|
-
*/
|
3428
|
-
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, language, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3429
|
-
/**
|
3430
|
-
The speech model to use.
|
3431
|
-
*/
|
3432
|
-
model: SpeechModelV2;
|
3433
|
-
/**
|
3434
|
-
The text to convert to speech.
|
3435
|
-
*/
|
3436
|
-
text: string;
|
3437
|
-
/**
|
3438
|
-
The voice to use for speech generation.
|
3439
|
-
*/
|
3440
|
-
voice?: string;
|
3441
|
-
/**
|
3442
|
-
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
3443
|
-
*/
|
3444
|
-
outputFormat?: 'mp3' | 'wav' | (string & {});
|
3445
|
-
/**
|
3446
|
-
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
3447
|
-
*/
|
3448
|
-
instructions?: string;
|
3449
|
-
/**
|
3450
|
-
The speed of the speech generation.
|
3451
|
-
*/
|
3452
|
-
speed?: number;
|
3453
|
-
/**
|
3454
|
-
The language for speech generation. This should be an ISO 639-1 language code (e.g. "en", "es", "fr")
|
3455
|
-
or "auto" for automatic language detection. Provider support varies.
|
3456
|
-
*/
|
3457
|
-
language?: string;
|
3458
|
-
/**
|
3459
|
-
Additional provider-specific options that are passed through to the provider
|
3460
|
-
as body parameters.
|
3461
|
-
|
3462
|
-
The outer record is keyed by the provider name, and the inner
|
3463
|
-
record is keyed by the provider-specific metadata key.
|
3464
|
-
```ts
|
3465
|
-
{
|
3466
|
-
"openai": {}
|
3467
|
-
}
|
3468
|
-
```
|
3469
|
-
*/
|
3470
|
-
providerOptions?: ProviderOptions;
|
3471
|
-
/**
|
3472
|
-
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
3473
|
-
|
3474
|
-
@default 2
|
3475
|
-
*/
|
3476
|
-
maxRetries?: number;
|
3477
|
-
/**
|
3478
|
-
Abort signal.
|
3479
|
-
*/
|
3480
|
-
abortSignal?: AbortSignal;
|
3481
|
-
/**
|
3482
|
-
Additional headers to include in the request.
|
3483
|
-
Only applicable for HTTP-based providers.
|
3484
|
-
*/
|
3485
|
-
headers?: Record<string, string>;
|
3486
|
-
}): Promise<SpeechResult>;
|
3487
|
-
|
3488
|
-
/**
|
3489
|
-
* Applies default settings for a language model.
|
3490
|
-
*/
|
3491
|
-
declare function defaultSettingsMiddleware({ settings, }: {
|
3492
|
-
settings: Partial<{
|
3493
|
-
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
|
3494
|
-
temperature?: LanguageModelV2CallOptions['temperature'];
|
3495
|
-
stopSequences?: LanguageModelV2CallOptions['stopSequences'];
|
3496
|
-
topP?: LanguageModelV2CallOptions['topP'];
|
3497
|
-
topK?: LanguageModelV2CallOptions['topK'];
|
3498
|
-
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
|
3499
|
-
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
|
3500
|
-
responseFormat?: LanguageModelV2CallOptions['responseFormat'];
|
3501
|
-
seed?: LanguageModelV2CallOptions['seed'];
|
3502
|
-
tools?: LanguageModelV2CallOptions['tools'];
|
3503
|
-
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
3504
|
-
headers?: LanguageModelV2CallOptions['headers'];
|
3505
|
-
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
3506
|
-
}>;
|
3507
|
-
}): LanguageModelV2Middleware;
|
3508
|
-
|
3509
|
-
/**
|
3510
|
-
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
3511
|
-
* as a `reasoning` property on the result.
|
3512
|
-
*
|
3513
|
-
* @param tagName - The name of the XML tag to extract reasoning from.
|
3514
|
-
* @param separator - The separator to use between reasoning and text sections.
|
3515
|
-
* @param startWithReasoning - Whether to start with reasoning tokens.
|
3516
|
-
*/
|
3517
|
-
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
3518
|
-
tagName: string;
|
3519
|
-
separator?: string;
|
3520
|
-
startWithReasoning?: boolean;
|
3521
|
-
}): LanguageModelV2Middleware;
|
3522
|
-
|
3523
|
-
/**
|
3524
|
-
* Simulates streaming chunks with the response from a generate call.
|
3525
|
-
*/
|
3526
|
-
declare function simulateStreamingMiddleware(): LanguageModelV2Middleware;
|
3527
|
-
|
3528
|
-
/**
|
3529
|
-
* Wraps a LanguageModelV2 instance with middleware functionality.
|
3530
|
-
* This function allows you to apply middleware to transform parameters,
|
3531
|
-
* wrap generate operations, and wrap stream operations of a language model.
|
3532
|
-
*
|
3533
|
-
* @param options - Configuration options for wrapping the language model.
|
3534
|
-
* @param options.model - The original LanguageModelV2 instance to be wrapped.
|
3535
|
-
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
3536
|
-
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
3537
|
-
* @param options.providerId - Optional custom provider ID to override the original model's provider.
|
3538
|
-
* @returns A new LanguageModelV2 instance with middleware applied.
|
3539
|
-
*/
|
3540
|
-
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
3541
|
-
model: LanguageModelV2;
|
3542
|
-
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
3543
|
-
modelId?: string;
|
3544
|
-
providerId?: string;
|
3545
|
-
}) => LanguageModelV2;
|
3546
|
-
|
3547
|
-
/**
|
3548
|
-
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
3549
|
-
*
|
3550
|
-
* @param {Object} options - The options for creating the custom provider.
|
3551
|
-
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
3552
|
-
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
3553
|
-
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
3554
|
-
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
3555
|
-
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
3556
|
-
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
3557
|
-
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
3558
|
-
*
|
3559
|
-
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
3560
|
-
*/
|
3561
|
-
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV2<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV2>, SPEECH_MODELS extends Record<string, SpeechModelV2>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
3562
|
-
languageModels?: LANGUAGE_MODELS;
|
3563
|
-
textEmbeddingModels?: EMBEDDING_MODELS;
|
3564
|
-
imageModels?: IMAGE_MODELS;
|
3565
|
-
transcriptionModels?: TRANSCRIPTION_MODELS;
|
3566
|
-
speechModels?: SPEECH_MODELS;
|
3567
|
-
fallbackProvider?: ProviderV2;
|
3568
|
-
}): ProviderV2 & {
|
3569
|
-
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
3570
|
-
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV2<string>;
|
3571
|
-
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
3572
|
-
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV2;
|
3573
|
-
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV2;
|
3567
|
+
declare const UI_MESSAGE_STREAM_HEADERS: {
|
3568
|
+
'content-type': string;
|
3569
|
+
'cache-control': string;
|
3570
|
+
connection: string;
|
3571
|
+
'x-vercel-ai-ui-message-stream': string;
|
3572
|
+
'x-accel-buffering': string;
|
3574
3573
|
};
|
3575
|
-
/**
|
3576
|
-
* @deprecated Use `customProvider` instead.
|
3577
|
-
*/
|
3578
|
-
declare const experimental_customProvider: typeof customProvider;
|
3579
|
-
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
3580
|
-
|
3581
|
-
declare const symbol: unique symbol;
|
3582
|
-
declare class NoSuchProviderError extends NoSuchModelError {
|
3583
|
-
private readonly [symbol];
|
3584
|
-
readonly providerId: string;
|
3585
|
-
readonly availableProviders: string[];
|
3586
|
-
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
3587
|
-
modelId: string;
|
3588
|
-
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
|
3589
|
-
providerId: string;
|
3590
|
-
availableProviders: string[];
|
3591
|
-
message?: string;
|
3592
|
-
});
|
3593
|
-
static isInstance(error: unknown): error is NoSuchProviderError;
|
3594
|
-
}
|
3595
|
-
|
3596
|
-
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
3597
|
-
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
3598
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
3599
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
3600
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV2<string>;
|
3601
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV2<string>;
|
3602
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
3603
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
3604
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV2;
|
3605
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV2;
|
3606
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV2;
|
3607
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV2;
|
3608
|
-
}
|
3609
|
-
/**
|
3610
|
-
* Creates a registry for the given providers.
|
3611
|
-
*/
|
3612
|
-
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, }?: {
|
3613
|
-
separator?: SEPARATOR;
|
3614
|
-
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
3615
|
-
/**
|
3616
|
-
* @deprecated Use `createProviderRegistry` instead.
|
3617
|
-
*/
|
3618
|
-
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
3619
3574
|
|
3620
|
-
|
3621
|
-
|
3622
|
-
|
3623
|
-
|
3624
|
-
|
3625
|
-
|
3626
|
-
|
3627
|
-
|
3628
|
-
|
3629
|
-
|
3630
|
-
|
3631
|
-
|
3632
|
-
|
3633
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3634
|
-
}, z.core.$loose>;
|
3635
|
-
}, z.core.$strict>;
|
3636
|
-
type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
|
3637
|
-
declare const JSONRPCErrorSchema: z.ZodObject<{
|
3638
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3639
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3640
|
-
error: z.ZodObject<{
|
3641
|
-
code: z.ZodNumber;
|
3642
|
-
message: z.ZodString;
|
3643
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
3644
|
-
}, z.core.$strip>;
|
3645
|
-
}, z.core.$strict>;
|
3646
|
-
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
3647
|
-
declare const JSONRPCNotificationSchema: z.ZodObject<{
|
3648
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3649
|
-
method: z.ZodString;
|
3650
|
-
params: z.ZodOptional<z.ZodObject<{
|
3651
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3652
|
-
}, z.core.$loose>>;
|
3653
|
-
}, z.core.$strict>;
|
3654
|
-
type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
|
3655
|
-
declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
|
3656
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3657
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3658
|
-
method: z.ZodString;
|
3659
|
-
params: z.ZodOptional<z.ZodObject<{
|
3660
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3661
|
-
}, z.core.$loose>>;
|
3662
|
-
}, z.core.$strict>, z.ZodObject<{
|
3663
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3664
|
-
method: z.ZodString;
|
3665
|
-
params: z.ZodOptional<z.ZodObject<{
|
3666
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3667
|
-
}, z.core.$loose>>;
|
3668
|
-
}, z.core.$strict>, z.ZodObject<{
|
3669
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3670
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3671
|
-
result: z.ZodObject<{
|
3672
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3673
|
-
}, z.core.$loose>;
|
3674
|
-
}, z.core.$strict>, z.ZodObject<{
|
3675
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
3676
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
3677
|
-
error: z.ZodObject<{
|
3678
|
-
code: z.ZodNumber;
|
3679
|
-
message: z.ZodString;
|
3680
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
3681
|
-
}, z.core.$strip>;
|
3682
|
-
}, z.core.$strict>]>;
|
3683
|
-
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
3575
|
+
interface ChatTransport<UI_MESSAGE extends UIMessage> {
|
3576
|
+
sendMessages: (options: {
|
3577
|
+
chatId: string;
|
3578
|
+
messages: UI_MESSAGE[];
|
3579
|
+
abortSignal: AbortSignal | undefined;
|
3580
|
+
} & {
|
3581
|
+
trigger: 'submit-user-message' | 'submit-tool-result' | 'regenerate-assistant-message';
|
3582
|
+
messageId: string | undefined;
|
3583
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
|
3584
|
+
reconnectToStream: (options: {
|
3585
|
+
chatId: string;
|
3586
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
3587
|
+
}
|
3684
3588
|
|
3685
|
-
|
3686
|
-
|
3687
|
-
|
3688
|
-
|
3689
|
-
|
3589
|
+
type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
|
3590
|
+
id?: UI_MESSAGE['id'];
|
3591
|
+
role?: UI_MESSAGE['role'];
|
3592
|
+
};
|
3593
|
+
type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
3594
|
+
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
3595
|
+
[K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
|
3596
|
+
};
|
3597
|
+
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
3598
|
+
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
3599
|
+
};
|
3600
|
+
type ChatRequestOptions = {
|
3690
3601
|
/**
|
3691
|
-
|
3602
|
+
Additional headers that should be to be passed to the API endpoint.
|
3692
3603
|
*/
|
3693
|
-
|
3604
|
+
headers?: Record<string, string> | Headers;
|
3694
3605
|
/**
|
3695
|
-
|
3696
|
-
* @param message The JSON-RPC message to send
|
3606
|
+
Additional body JSON properties that should be sent to the API endpoint.
|
3697
3607
|
*/
|
3698
|
-
|
3608
|
+
body?: object;
|
3609
|
+
metadata?: unknown;
|
3610
|
+
};
|
3611
|
+
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
3612
|
+
interface ChatState<UI_MESSAGE extends UIMessage> {
|
3613
|
+
status: ChatStatus;
|
3614
|
+
error: Error | undefined;
|
3615
|
+
messages: UI_MESSAGE[];
|
3616
|
+
pushMessage: (message: UI_MESSAGE) => void;
|
3617
|
+
popMessage: () => void;
|
3618
|
+
replaceMessage: (index: number, message: UI_MESSAGE) => void;
|
3619
|
+
snapshot: <T>(thing: T) => T;
|
3620
|
+
}
|
3621
|
+
type ChatOnErrorCallback = (error: Error) => void;
|
3622
|
+
type ChatOnToolCallCallback = ({ toolCall, }: {
|
3623
|
+
toolCall: ToolCall<string, unknown>;
|
3624
|
+
}) => void | Promise<unknown> | unknown;
|
3625
|
+
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
3626
|
+
type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
|
3627
|
+
message: UI_MESSAGE;
|
3628
|
+
}) => void;
|
3629
|
+
interface ChatInit<UI_MESSAGE extends UIMessage> {
|
3699
3630
|
/**
|
3700
|
-
*
|
3631
|
+
* A unique identifier for the chat. If not provided, a random one will be
|
3632
|
+
* generated.
|
3701
3633
|
*/
|
3702
|
-
|
3634
|
+
id?: string;
|
3635
|
+
messageMetadataSchema?: Validator<InferUIMessageMetadata<UI_MESSAGE>> | StandardSchemaV1<InferUIMessageMetadata<UI_MESSAGE>>;
|
3636
|
+
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
3637
|
+
messages?: UI_MESSAGE[];
|
3703
3638
|
/**
|
3704
|
-
*
|
3639
|
+
* A way to provide a function that is going to be used for ids for messages and the chat.
|
3640
|
+
* If not provided the default AI SDK `generateId` is used.
|
3705
3641
|
*/
|
3706
|
-
|
3642
|
+
generateId?: IdGenerator;
|
3643
|
+
transport?: ChatTransport<UI_MESSAGE>;
|
3644
|
+
maxSteps?: number;
|
3707
3645
|
/**
|
3708
|
-
*
|
3646
|
+
* Callback function to be called when an error is encountered.
|
3709
3647
|
*/
|
3710
|
-
|
3648
|
+
onError?: ChatOnErrorCallback;
|
3711
3649
|
/**
|
3712
|
-
|
3650
|
+
Optional callback function that is invoked when a tool call is received.
|
3651
|
+
Intended for automatic client-side tool execution.
|
3652
|
+
|
3653
|
+
You can optionally return a result for the tool call,
|
3654
|
+
either synchronously or asynchronously.
|
3655
|
+
*/
|
3656
|
+
onToolCall?: ChatOnToolCallCallback;
|
3657
|
+
/**
|
3658
|
+
* Optional callback function that is called when the assistant message is finished streaming.
|
3659
|
+
*
|
3660
|
+
* @param message The message that was streamed.
|
3713
3661
|
*/
|
3714
|
-
|
3662
|
+
onFinish?: ChatOnFinishCallback<UI_MESSAGE>;
|
3663
|
+
/**
|
3664
|
+
* Optional callback function that is called when a data part is received.
|
3665
|
+
*
|
3666
|
+
* @param data The data part that was received.
|
3667
|
+
*/
|
3668
|
+
onData?: ChatOnDataCallback<UI_MESSAGE>;
|
3715
3669
|
}
|
3716
|
-
|
3717
|
-
|
3670
|
+
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
3671
|
+
readonly id: string;
|
3672
|
+
readonly generateId: IdGenerator;
|
3673
|
+
protected state: ChatState<UI_MESSAGE>;
|
3674
|
+
private messageMetadataSchema;
|
3675
|
+
private dataPartSchemas;
|
3676
|
+
private readonly transport;
|
3677
|
+
private maxSteps;
|
3678
|
+
private onError?;
|
3679
|
+
private onToolCall?;
|
3680
|
+
private onFinish?;
|
3681
|
+
private onData?;
|
3682
|
+
private activeResponse;
|
3683
|
+
private jobExecutor;
|
3684
|
+
constructor({ generateId, id, transport, maxSteps, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
3685
|
+
state: ChatState<UI_MESSAGE>;
|
3686
|
+
});
|
3718
3687
|
/**
|
3719
|
-
*
|
3688
|
+
* Hook status:
|
3689
|
+
*
|
3690
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
3691
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
3692
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
3693
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
3720
3694
|
*/
|
3721
|
-
|
3695
|
+
get status(): ChatStatus;
|
3696
|
+
protected setStatus({ status, error, }: {
|
3697
|
+
status: ChatStatus;
|
3698
|
+
error?: Error;
|
3699
|
+
}): void;
|
3700
|
+
get error(): Error | undefined;
|
3701
|
+
get messages(): UI_MESSAGE[];
|
3702
|
+
get lastMessage(): UI_MESSAGE | undefined;
|
3703
|
+
set messages(messages: UI_MESSAGE[]);
|
3722
3704
|
/**
|
3723
|
-
*
|
3705
|
+
* Appends or replaces a user message to the chat list. This triggers the API call to fetch
|
3706
|
+
* the assistant's response.
|
3707
|
+
*
|
3708
|
+
* If a messageId is provided, the message will be replaced.
|
3724
3709
|
*/
|
3725
|
-
|
3726
|
-
|
3727
|
-
|
3728
|
-
|
3729
|
-
|
3730
|
-
|
3731
|
-
|
3732
|
-
|
3733
|
-
|
3734
|
-
|
3735
|
-
|
3736
|
-
|
3737
|
-
|
3738
|
-
|
3739
|
-
|
3740
|
-
|
3741
|
-
content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
3742
|
-
type: z.ZodLiteral<"text">;
|
3743
|
-
text: z.ZodString;
|
3744
|
-
}, z.core.$loose>, z.ZodObject<{
|
3745
|
-
type: z.ZodLiteral<"image">;
|
3746
|
-
data: z.ZodString;
|
3747
|
-
mimeType: z.ZodString;
|
3748
|
-
}, z.core.$loose>, z.ZodObject<{
|
3749
|
-
type: z.ZodLiteral<"resource">;
|
3750
|
-
resource: z.ZodUnion<readonly [z.ZodObject<{
|
3751
|
-
uri: z.ZodString;
|
3752
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
3753
|
-
text: z.ZodString;
|
3754
|
-
}, z.core.$loose>, z.ZodObject<{
|
3755
|
-
uri: z.ZodString;
|
3756
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
3757
|
-
blob: z.ZodString;
|
3758
|
-
}, z.core.$loose>]>;
|
3759
|
-
}, z.core.$loose>]>>;
|
3760
|
-
isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
|
3761
|
-
}, z.core.$loose>, z.ZodObject<{
|
3762
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
3763
|
-
toolResult: z.ZodUnknown;
|
3764
|
-
}, z.core.$loose>]>;
|
3765
|
-
type CallToolResult = z.infer<typeof CallToolResultSchema>;
|
3766
|
-
|
3767
|
-
interface MCPClientConfig {
|
3768
|
-
/** Transport configuration for connecting to the MCP server */
|
3769
|
-
transport: MCPTransportConfig | MCPTransport;
|
3770
|
-
/** Optional callback for uncaught errors */
|
3771
|
-
onUncaughtError?: (error: unknown) => void;
|
3772
|
-
/** Optional client name, defaults to 'ai-sdk-mcp-client' */
|
3773
|
-
name?: string;
|
3774
|
-
}
|
3775
|
-
declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
|
3776
|
-
/**
|
3777
|
-
* A lightweight MCP Client implementation
|
3778
|
-
*
|
3779
|
-
* The primary purpose of this client is tool conversion between MCP<>AI SDK
|
3780
|
-
* but can later be extended to support other MCP features
|
3781
|
-
*
|
3782
|
-
* Tool parameters are automatically inferred from the server's JSON schema
|
3783
|
-
* if not explicitly provided in the tools configuration
|
3784
|
-
*
|
3785
|
-
* This client is meant to be used to communicate with a single server. To communicate and fetch tools across multiple servers, it's recommended to create a new client instance per server.
|
3786
|
-
*
|
3787
|
-
* Not supported:
|
3788
|
-
* - Client options (e.g. sampling, roots) as they are not needed for tool conversion
|
3789
|
-
* - Accepting notifications
|
3790
|
-
* - Session management (when passing a sessionId to an instance of the Streamable HTTP transport)
|
3791
|
-
* - Resumable SSE streams
|
3792
|
-
*/
|
3793
|
-
declare class MCPClient {
|
3794
|
-
private transport;
|
3795
|
-
private onUncaughtError?;
|
3796
|
-
private clientInfo;
|
3797
|
-
private requestMessageId;
|
3798
|
-
private responseHandlers;
|
3799
|
-
private serverCapabilities;
|
3800
|
-
private isClosed;
|
3801
|
-
constructor({ transport: transportConfig, name, onUncaughtError, }: MCPClientConfig);
|
3802
|
-
init(): Promise<this>;
|
3803
|
-
close(): Promise<void>;
|
3804
|
-
private assertCapability;
|
3805
|
-
private request;
|
3806
|
-
private listTools;
|
3807
|
-
private callTool;
|
3808
|
-
private notification;
|
3710
|
+
sendMessage: (message: (CreateUIMessage<UI_MESSAGE> & {
|
3711
|
+
text?: never;
|
3712
|
+
files?: never;
|
3713
|
+
messageId?: string;
|
3714
|
+
}) | {
|
3715
|
+
text: string;
|
3716
|
+
files?: FileList | FileUIPart[];
|
3717
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
3718
|
+
parts?: never;
|
3719
|
+
messageId?: string;
|
3720
|
+
} | {
|
3721
|
+
files: FileList | FileUIPart[];
|
3722
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
3723
|
+
parts?: never;
|
3724
|
+
messageId?: string;
|
3725
|
+
}, options?: ChatRequestOptions) => Promise<void>;
|
3809
3726
|
/**
|
3810
|
-
*
|
3811
|
-
*
|
3727
|
+
* Regenerate the assistant message with the provided message id.
|
3728
|
+
* If no message id is provided, the last assistant message will be regenerated.
|
3812
3729
|
*/
|
3813
|
-
|
3814
|
-
|
3815
|
-
})
|
3816
|
-
|
3817
|
-
|
3818
|
-
|
3730
|
+
regenerate: ({ messageId, ...options }?: {
|
3731
|
+
messageId?: string;
|
3732
|
+
} & ChatRequestOptions) => Promise<void>;
|
3733
|
+
/**
|
3734
|
+
* Attempt to resume an ongoing streaming response.
|
3735
|
+
*/
|
3736
|
+
resumeStream: (options?: ChatRequestOptions) => Promise<void>;
|
3737
|
+
addToolResult: ({ toolCallId, output, }: {
|
3738
|
+
toolCallId: string;
|
3739
|
+
output: unknown;
|
3740
|
+
}) => Promise<void>;
|
3741
|
+
/**
|
3742
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
3743
|
+
*/
|
3744
|
+
stop: () => Promise<void>;
|
3745
|
+
private makeRequest;
|
3819
3746
|
}
|
3820
3747
|
|
3748
|
+
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
3749
|
+
|
3821
3750
|
/**
|
3822
|
-
|
3823
|
-
|
3751
|
+
Converts an array of messages from useChat into an array of CoreMessages that can be used
|
3752
|
+
with the AI core functions (e.g. `streamText`).
|
3753
|
+
|
3754
|
+
@param messages - The messages to convert.
|
3755
|
+
@param options.tools - The tools to use.
|
3756
|
+
@param options.ignoreIncompleteToolCalls - Whether to ignore incomplete tool calls. Default is `false`.
|
3824
3757
|
*/
|
3825
|
-
|
3758
|
+
declare function convertToModelMessages(messages: Array<Omit<UIMessage, 'id'>>, options?: {
|
3759
|
+
tools?: ToolSet;
|
3760
|
+
ignoreIncompleteToolCalls?: boolean;
|
3761
|
+
}): ModelMessage[];
|
3762
|
+
/**
|
3763
|
+
@deprecated Use `convertToModelMessages` instead.
|
3764
|
+
*/
|
3765
|
+
declare const convertToCoreMessages: typeof convertToModelMessages;
|
3766
|
+
|
3767
|
+
type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
|
3768
|
+
id: string;
|
3769
|
+
messages: UI_MESSAGE[];
|
3770
|
+
requestMetadata: unknown;
|
3771
|
+
body: Record<string, any> | undefined;
|
3772
|
+
credentials: RequestCredentials | undefined;
|
3773
|
+
headers: HeadersInit | undefined;
|
3774
|
+
api: string;
|
3775
|
+
} & {
|
3776
|
+
trigger: 'submit-user-message' | 'submit-tool-result' | 'regenerate-assistant-message';
|
3777
|
+
messageId: string | undefined;
|
3778
|
+
}) => {
|
3779
|
+
body: object;
|
3780
|
+
headers?: HeadersInit;
|
3781
|
+
credentials?: RequestCredentials;
|
3782
|
+
api?: string;
|
3783
|
+
} | PromiseLike<{
|
3784
|
+
body: object;
|
3785
|
+
headers?: HeadersInit;
|
3786
|
+
credentials?: RequestCredentials;
|
3787
|
+
api?: string;
|
3788
|
+
}>;
|
3789
|
+
type PrepareReconnectToStreamRequest = (options: {
|
3790
|
+
id: string;
|
3791
|
+
requestMetadata: unknown;
|
3792
|
+
body: Record<string, any> | undefined;
|
3793
|
+
credentials: RequestCredentials | undefined;
|
3794
|
+
headers: HeadersInit | undefined;
|
3795
|
+
api: string;
|
3796
|
+
}) => {
|
3797
|
+
headers?: HeadersInit;
|
3798
|
+
credentials?: RequestCredentials;
|
3799
|
+
api?: string;
|
3800
|
+
} | PromiseLike<{
|
3801
|
+
headers?: HeadersInit;
|
3802
|
+
credentials?: RequestCredentials;
|
3803
|
+
api?: string;
|
3804
|
+
}>;
|
3805
|
+
/**
|
3806
|
+
* Options for the `HttpChatTransport` class.
|
3807
|
+
*
|
3808
|
+
* @param UI_MESSAGE - The type of message to be used in the chat.
|
3809
|
+
*/
|
3810
|
+
type HttpChatTransportInitOptions<UI_MESSAGE extends UIMessage> = {
|
3826
3811
|
/**
|
3827
|
-
* The
|
3812
|
+
* The API URL to be used for the chat transport.
|
3813
|
+
* Defaults to '/api/chat'.
|
3828
3814
|
*/
|
3829
|
-
|
3815
|
+
api?: string;
|
3830
3816
|
/**
|
3831
|
-
*
|
3832
|
-
*
|
3817
|
+
* The credentials mode to be used for the fetch request.
|
3818
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
3819
|
+
* Defaults to 'same-origin'.
|
3833
3820
|
*/
|
3834
|
-
|
3835
|
-
/**
|
3836
|
-
* The text content of this segment.
|
3837
|
-
*/
|
3838
|
-
readonly text: string;
|
3839
|
-
/**
|
3840
|
-
* The start time of this segment in seconds.
|
3841
|
-
*/
|
3842
|
-
readonly startSecond: number;
|
3843
|
-
/**
|
3844
|
-
* The end time of this segment in seconds.
|
3845
|
-
*/
|
3846
|
-
readonly endSecond: number;
|
3847
|
-
}>;
|
3821
|
+
credentials?: Resolvable<RequestCredentials>;
|
3848
3822
|
/**
|
3849
|
-
*
|
3850
|
-
* May be undefined if the language couldn't be detected.
|
3823
|
+
* HTTP headers to be sent with the API request.
|
3851
3824
|
*/
|
3852
|
-
|
3825
|
+
headers?: Resolvable<Record<string, string> | Headers>;
|
3853
3826
|
/**
|
3854
|
-
*
|
3855
|
-
*
|
3827
|
+
* Extra body object to be sent with the API request.
|
3828
|
+
* @example
|
3829
|
+
* Send a `sessionId` to the API along with the messages.
|
3830
|
+
* ```js
|
3831
|
+
* useChat({
|
3832
|
+
* body: {
|
3833
|
+
* sessionId: '123',
|
3834
|
+
* }
|
3835
|
+
* })
|
3836
|
+
* ```
|
3856
3837
|
*/
|
3857
|
-
|
3838
|
+
body?: Resolvable<object>;
|
3858
3839
|
/**
|
3859
|
-
|
3860
|
-
|
3861
|
-
|
3840
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
3841
|
+
or to provide a custom fetch implementation for e.g. testing.
|
3842
|
+
*/
|
3843
|
+
fetch?: FetchFunction;
|
3862
3844
|
/**
|
3863
|
-
|
3845
|
+
* When a function is provided, it will be used
|
3846
|
+
* to prepare the request body for the chat API. This can be useful for
|
3847
|
+
* customizing the request body based on the messages and data in the chat.
|
3848
|
+
*
|
3849
|
+
* @param id The id of the chat.
|
3850
|
+
* @param messages The current messages in the chat.
|
3851
|
+
* @param requestBody The request body object passed in the chat request.
|
3864
3852
|
*/
|
3865
|
-
|
3853
|
+
prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
3866
3854
|
/**
|
3867
|
-
|
3855
|
+
* When a function is provided, it will be used
|
3856
|
+
* to prepare the request body for the chat API. This can be useful for
|
3857
|
+
* customizing the request body based on the messages and data in the chat.
|
3858
|
+
*
|
3859
|
+
* @param id The id of the chat.
|
3860
|
+
* @param messages The current messages in the chat.
|
3861
|
+
* @param requestBody The request body object passed in the chat request.
|
3868
3862
|
*/
|
3869
|
-
|
3863
|
+
prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
3864
|
+
};
|
3865
|
+
declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
|
3866
|
+
protected api: string;
|
3867
|
+
protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];
|
3868
|
+
protected headers: HttpChatTransportInitOptions<UI_MESSAGE>['headers'];
|
3869
|
+
protected body: HttpChatTransportInitOptions<UI_MESSAGE>['body'];
|
3870
|
+
protected fetch?: FetchFunction;
|
3871
|
+
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
3872
|
+
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
3873
|
+
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3874
|
+
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
3875
|
+
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
3876
|
+
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3870
3877
|
}
|
3871
3878
|
|
3872
|
-
|
3873
|
-
|
3879
|
+
declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
3880
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3881
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3882
|
+
}
|
3874
3883
|
|
3875
|
-
|
3876
|
-
|
3877
|
-
|
3878
|
-
|
3879
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
3880
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
3881
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
3884
|
+
declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
3885
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
3886
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
3887
|
+
}
|
3882
3888
|
|
3883
|
-
|
3884
|
-
*/
|
3885
|
-
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
3889
|
+
type CompletionRequestOptions = {
|
3886
3890
|
/**
|
3887
|
-
|
3891
|
+
An optional object of headers to be passed to the API endpoint.
|
3892
|
+
*/
|
3893
|
+
headers?: Record<string, string> | Headers;
|
3894
|
+
/**
|
3895
|
+
An optional object to be passed to the API endpoint.
|
3888
3896
|
*/
|
3889
|
-
|
3897
|
+
body?: object;
|
3898
|
+
};
|
3899
|
+
type UseCompletionOptions = {
|
3890
3900
|
/**
|
3891
|
-
|
3901
|
+
* The API endpoint that accepts a `{ prompt: string }` object and returns
|
3902
|
+
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
|
3892
3903
|
*/
|
3893
|
-
|
3904
|
+
api?: string;
|
3894
3905
|
/**
|
3895
|
-
|
3896
|
-
|
3897
|
-
|
3898
|
-
|
3899
|
-
|
3900
|
-
```ts
|
3901
|
-
{
|
3902
|
-
"openai": {
|
3903
|
-
"temperature": 0
|
3904
|
-
}
|
3905
|
-
}
|
3906
|
-
```
|
3907
|
-
*/
|
3908
|
-
providerOptions?: ProviderOptions;
|
3906
|
+
* An unique identifier for the chat. If not provided, a random one will be
|
3907
|
+
* generated. When provided, the `useChat` hook with the same `id` will
|
3908
|
+
* have shared states across components.
|
3909
|
+
*/
|
3910
|
+
id?: string;
|
3909
3911
|
/**
|
3910
|
-
|
3911
|
-
|
3912
|
-
@default 2
|
3912
|
+
* Initial prompt input of the completion.
|
3913
3913
|
*/
|
3914
|
-
|
3914
|
+
initialInput?: string;
|
3915
3915
|
/**
|
3916
|
-
|
3917
|
-
|
3918
|
-
|
3916
|
+
* Initial completion result. Useful to load an existing history.
|
3917
|
+
*/
|
3918
|
+
initialCompletion?: string;
|
3919
3919
|
/**
|
3920
|
-
|
3921
|
-
|
3922
|
-
|
3923
|
-
|
3924
|
-
|
3920
|
+
* Callback function to be called when the completion is finished streaming.
|
3921
|
+
*/
|
3922
|
+
onFinish?: (prompt: string, completion: string) => void;
|
3923
|
+
/**
|
3924
|
+
* Callback function to be called when an error is encountered.
|
3925
|
+
*/
|
3926
|
+
onError?: (error: Error) => void;
|
3927
|
+
/**
|
3928
|
+
* The credentials mode to be used for the fetch request.
|
3929
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
3930
|
+
* Defaults to 'same-origin'.
|
3931
|
+
*/
|
3932
|
+
credentials?: RequestCredentials;
|
3933
|
+
/**
|
3934
|
+
* HTTP headers to be sent with the API request.
|
3935
|
+
*/
|
3936
|
+
headers?: Record<string, string> | Headers;
|
3937
|
+
/**
|
3938
|
+
* Extra body object to be sent with the API request.
|
3939
|
+
* @example
|
3940
|
+
* Send a `sessionId` to the API along with the prompt.
|
3941
|
+
* ```js
|
3942
|
+
* useChat({
|
3943
|
+
* body: {
|
3944
|
+
* sessionId: '123',
|
3945
|
+
* }
|
3946
|
+
* })
|
3947
|
+
* ```
|
3948
|
+
*/
|
3949
|
+
body?: object;
|
3950
|
+
/**
|
3951
|
+
Streaming protocol that is used. Defaults to `data`.
|
3952
|
+
*/
|
3953
|
+
streamProtocol?: 'data' | 'text';
|
3954
|
+
/**
|
3955
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
3956
|
+
or to provide a custom fetch implementation for e.g. testing.
|
3957
|
+
*/
|
3958
|
+
fetch?: FetchFunction;
|
3959
|
+
};
|
3925
3960
|
|
3926
3961
|
declare global {
|
3927
3962
|
var AI_SDK_DEFAULT_PROVIDER: ProviderV2 | undefined;
|
3928
3963
|
}
|
3929
3964
|
|
3930
|
-
export { AbstractChat, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferUIDataParts, InferUITool, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolErrorUnion, ToolResultUnion, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UITools, UI_MESSAGE_STREAM_HEADERS, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, hasToolCall, isDeepEqualData, isToolUIPart, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };
|
3965
|
+
export { AbstractChat, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferUIDataParts, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SerialJobExecutor, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolErrorUnion, ToolResultUnion, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UITools, UI_MESSAGE_STREAM_HEADERS, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, hasToolCall, isDeepEqualData, isToolUIPart, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, userModelMessageSchema, wrapLanguageModel };
|