ai 5.1.0-beta.8 → 6.0.0-beta.100
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +704 -0
- package/README.md +130 -45
- package/dist/index.d.mts +2212 -1704
- package/dist/index.d.ts +2212 -1704
- package/dist/index.js +4435 -3547
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +4385 -3515
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -7
- package/dist/internal/index.d.ts +7 -7
- package/dist/internal/index.js +186 -79
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +186 -79
- package/dist/internal/index.mjs.map +1 -1
- package/dist/test/index.d.mts +69 -53
- package/dist/test/index.d.ts +69 -53
- package/dist/test/index.js +61 -27
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +55 -22
- package/dist/test/index.mjs.map +1 -1
- package/package.json +12 -15
- package/dist/mcp-stdio/index.d.mts +0 -89
- package/dist/mcp-stdio/index.d.ts +0 -89
- package/dist/mcp-stdio/index.js +0 -349
- package/dist/mcp-stdio/index.js.map +0 -1
- package/dist/mcp-stdio/index.mjs +0 -322
- package/dist/mcp-stdio/index.mjs.map +0 -1
- package/mcp-stdio.d.ts +0 -1
package/dist/index.d.ts
CHANGED
|
@@ -1,164 +1,19 @@
|
|
|
1
1
|
export { createGateway, gateway } from '@ai-sdk/gateway';
|
|
2
|
-
import
|
|
3
|
-
|
|
4
|
-
|
|
2
|
+
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
|
+
import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
|
|
4
|
+
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
|
|
5
5
|
import * as _ai_sdk_provider from '@ai-sdk/provider';
|
|
6
|
-
import { EmbeddingModelV3, EmbeddingModelV3Embedding,
|
|
7
|
-
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
|
8
|
-
import * as z3 from 'zod/v3';
|
|
9
|
-
import * as z4 from 'zod/v4';
|
|
10
|
-
import { z } from 'zod/v4';
|
|
6
|
+
import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, EmbeddingModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV2, TranscriptionModelV3CallWarning, LanguageModelV3Usage, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, JSONObject, SharedV3Warning, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
|
7
|
+
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
|
11
8
|
import { ServerResponse } from 'node:http';
|
|
9
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
|
12
10
|
import { ServerResponse as ServerResponse$1 } from 'http';
|
|
13
|
-
|
|
14
|
-
type CallSettings = {
|
|
15
|
-
/**
|
|
16
|
-
Maximum number of tokens to generate.
|
|
17
|
-
*/
|
|
18
|
-
maxOutputTokens?: number;
|
|
19
|
-
/**
|
|
20
|
-
Temperature setting. The range depends on the provider and model.
|
|
21
|
-
|
|
22
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
23
|
-
*/
|
|
24
|
-
temperature?: number;
|
|
25
|
-
/**
|
|
26
|
-
Nucleus sampling. This is a number between 0 and 1.
|
|
27
|
-
|
|
28
|
-
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
29
|
-
are considered.
|
|
30
|
-
|
|
31
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
32
|
-
*/
|
|
33
|
-
topP?: number;
|
|
34
|
-
/**
|
|
35
|
-
Only sample from the top K options for each subsequent token.
|
|
36
|
-
|
|
37
|
-
Used to remove "long tail" low probability responses.
|
|
38
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
39
|
-
*/
|
|
40
|
-
topK?: number;
|
|
41
|
-
/**
|
|
42
|
-
Presence penalty setting. It affects the likelihood of the model to
|
|
43
|
-
repeat information that is already in the prompt.
|
|
44
|
-
|
|
45
|
-
The presence penalty is a number between -1 (increase repetition)
|
|
46
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
47
|
-
*/
|
|
48
|
-
presencePenalty?: number;
|
|
49
|
-
/**
|
|
50
|
-
Frequency penalty setting. It affects the likelihood of the model
|
|
51
|
-
to repeatedly use the same words or phrases.
|
|
52
|
-
|
|
53
|
-
The frequency penalty is a number between -1 (increase repetition)
|
|
54
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
55
|
-
*/
|
|
56
|
-
frequencyPenalty?: number;
|
|
57
|
-
/**
|
|
58
|
-
Stop sequences.
|
|
59
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
60
|
-
Providers may have limits on the number of stop sequences.
|
|
61
|
-
*/
|
|
62
|
-
stopSequences?: string[];
|
|
63
|
-
/**
|
|
64
|
-
The seed (integer) to use for random sampling. If set and supported
|
|
65
|
-
by the model, calls will generate deterministic results.
|
|
66
|
-
*/
|
|
67
|
-
seed?: number;
|
|
68
|
-
/**
|
|
69
|
-
Maximum number of retries. Set to 0 to disable retries.
|
|
70
|
-
|
|
71
|
-
@default 2
|
|
72
|
-
*/
|
|
73
|
-
maxRetries?: number;
|
|
74
|
-
/**
|
|
75
|
-
Abort signal.
|
|
76
|
-
*/
|
|
77
|
-
abortSignal?: AbortSignal;
|
|
78
|
-
/**
|
|
79
|
-
Additional HTTP headers to be sent with the request.
|
|
80
|
-
Only applicable for HTTP-based providers.
|
|
81
|
-
*/
|
|
82
|
-
headers?: Record<string, string | undefined>;
|
|
83
|
-
};
|
|
84
|
-
|
|
85
|
-
/**
|
|
86
|
-
Prompt part of the AI function options.
|
|
87
|
-
It contains a system message, a simple text prompt, or a list of messages.
|
|
88
|
-
*/
|
|
89
|
-
type Prompt = {
|
|
90
|
-
/**
|
|
91
|
-
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
92
|
-
*/
|
|
93
|
-
system?: string;
|
|
94
|
-
} & ({
|
|
95
|
-
/**
|
|
96
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
97
|
-
|
|
98
|
-
You can either use `prompt` or `messages` but not both.
|
|
99
|
-
*/
|
|
100
|
-
prompt: string | Array<ModelMessage>;
|
|
101
|
-
/**
|
|
102
|
-
A list of messages.
|
|
103
|
-
|
|
104
|
-
You can either use `prompt` or `messages` but not both.
|
|
105
|
-
*/
|
|
106
|
-
messages?: never;
|
|
107
|
-
} | {
|
|
108
|
-
/**
|
|
109
|
-
A list of messages.
|
|
110
|
-
|
|
111
|
-
You can either use `prompt` or `messages` but not both.
|
|
112
|
-
*/
|
|
113
|
-
messages: Array<ModelMessage>;
|
|
114
|
-
/**
|
|
115
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
116
|
-
|
|
117
|
-
You can either use `prompt` or `messages` but not both.
|
|
118
|
-
*/
|
|
119
|
-
prompt?: never;
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Telemetry configuration.
|
|
124
|
-
*/
|
|
125
|
-
type TelemetrySettings = {
|
|
126
|
-
/**
|
|
127
|
-
* Enable or disable telemetry. Disabled by default while experimental.
|
|
128
|
-
*/
|
|
129
|
-
isEnabled?: boolean;
|
|
130
|
-
/**
|
|
131
|
-
* Enable or disable input recording. Enabled by default.
|
|
132
|
-
*
|
|
133
|
-
* You might want to disable input recording to avoid recording sensitive
|
|
134
|
-
* information, to reduce data transfers, or to increase performance.
|
|
135
|
-
*/
|
|
136
|
-
recordInputs?: boolean;
|
|
137
|
-
/**
|
|
138
|
-
* Enable or disable output recording. Enabled by default.
|
|
139
|
-
*
|
|
140
|
-
* You might want to disable output recording to avoid recording sensitive
|
|
141
|
-
* information, to reduce data transfers, or to increase performance.
|
|
142
|
-
*/
|
|
143
|
-
recordOutputs?: boolean;
|
|
144
|
-
/**
|
|
145
|
-
* Identifier for this function. Used to group telemetry data by function.
|
|
146
|
-
*/
|
|
147
|
-
functionId?: string;
|
|
148
|
-
/**
|
|
149
|
-
* Additional information to include in the telemetry data.
|
|
150
|
-
*/
|
|
151
|
-
metadata?: Record<string, AttributeValue>;
|
|
152
|
-
/**
|
|
153
|
-
* A custom tracer to use for the telemetry data.
|
|
154
|
-
*/
|
|
155
|
-
tracer?: Tracer;
|
|
156
|
-
};
|
|
11
|
+
import { z } from 'zod/v4';
|
|
157
12
|
|
|
158
13
|
/**
|
|
159
14
|
Embedding model that is used by the AI SDK Core functions.
|
|
160
15
|
*/
|
|
161
|
-
type EmbeddingModel<VALUE = string> = string | EmbeddingModelV3<VALUE>;
|
|
16
|
+
type EmbeddingModel<VALUE = string> = string | EmbeddingModelV3<VALUE> | EmbeddingModelV2<VALUE>;
|
|
162
17
|
/**
|
|
163
18
|
Embedding.
|
|
164
19
|
*/
|
|
@@ -167,16 +22,16 @@ type Embedding = EmbeddingModelV3Embedding;
|
|
|
167
22
|
/**
|
|
168
23
|
Image model that is used by the AI SDK Core functions.
|
|
169
24
|
*/
|
|
170
|
-
type ImageModel =
|
|
25
|
+
type ImageModel = ImageModelV3;
|
|
171
26
|
/**
|
|
172
27
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
173
28
|
some settings might not be supported, which can lead to suboptimal results.
|
|
174
29
|
*/
|
|
175
|
-
type ImageGenerationWarning =
|
|
30
|
+
type ImageGenerationWarning = ImageModelV3CallWarning;
|
|
176
31
|
/**
|
|
177
32
|
Metadata from the model provider for this call
|
|
178
33
|
*/
|
|
179
|
-
type ImageModelProviderMetadata =
|
|
34
|
+
type ImageModelProviderMetadata = ImageModelV3ProviderMetadata;
|
|
180
35
|
|
|
181
36
|
type ImageModelResponseMetadata = {
|
|
182
37
|
/**
|
|
@@ -198,7 +53,7 @@ type JSONValue = JSONValue$1;
|
|
|
198
53
|
/**
|
|
199
54
|
Language model that is used by the AI SDK Core functions.
|
|
200
55
|
*/
|
|
201
|
-
type LanguageModel = string | LanguageModelV2;
|
|
56
|
+
type LanguageModel = string | LanguageModelV3 | LanguageModelV2;
|
|
202
57
|
/**
|
|
203
58
|
Reason why a language model finished generating a response.
|
|
204
59
|
|
|
@@ -210,16 +65,16 @@ Can be one of the following:
|
|
|
210
65
|
- `error`: model stopped because of an error
|
|
211
66
|
- `other`: model stopped for other reasons
|
|
212
67
|
*/
|
|
213
|
-
type FinishReason =
|
|
68
|
+
type FinishReason = LanguageModelV3FinishReason;
|
|
214
69
|
/**
|
|
215
70
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
216
71
|
some settings might not be supported, which can lead to suboptimal results.
|
|
217
72
|
*/
|
|
218
|
-
type CallWarning =
|
|
73
|
+
type CallWarning = LanguageModelV3CallWarning;
|
|
219
74
|
/**
|
|
220
75
|
A source that has been used as input to generate the response.
|
|
221
76
|
*/
|
|
222
|
-
type Source =
|
|
77
|
+
type Source = LanguageModelV3Source;
|
|
223
78
|
/**
|
|
224
79
|
Tool choice for the generation. It supports the following settings:
|
|
225
80
|
|
|
@@ -233,7 +88,9 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
|
|
|
233
88
|
toolName: Extract<keyof TOOLS, string>;
|
|
234
89
|
};
|
|
235
90
|
|
|
236
|
-
type LanguageModelMiddleware =
|
|
91
|
+
type LanguageModelMiddleware = LanguageModelV3Middleware;
|
|
92
|
+
|
|
93
|
+
type EmbeddingModelMiddleware = EmbeddingModelV3Middleware;
|
|
237
94
|
|
|
238
95
|
type LanguageModelRequestMetadata = {
|
|
239
96
|
/**
|
|
@@ -261,6 +118,11 @@ type LanguageModelResponseMetadata = {
|
|
|
261
118
|
headers?: Record<string, string>;
|
|
262
119
|
};
|
|
263
120
|
|
|
121
|
+
/**
|
|
122
|
+
* Reranking model that is used by the AI SDK Core functions.
|
|
123
|
+
*/
|
|
124
|
+
type RerankingModel = RerankingModelV3;
|
|
125
|
+
|
|
264
126
|
/**
|
|
265
127
|
* Provider for language, text embedding, and image models.
|
|
266
128
|
*/
|
|
@@ -296,6 +158,17 @@ type Provider = {
|
|
|
296
158
|
@returns {ImageModel} The image model associated with the id
|
|
297
159
|
*/
|
|
298
160
|
imageModel(modelId: string): ImageModel;
|
|
161
|
+
/**
|
|
162
|
+
Returns the reranking model with the given id.
|
|
163
|
+
The model id is then passed to the provider function to get the model.
|
|
164
|
+
|
|
165
|
+
@param {string} id - The id of the model to return.
|
|
166
|
+
|
|
167
|
+
@return {RerankingModel<VALUE>} The reranking model associated with the id
|
|
168
|
+
|
|
169
|
+
@throws {NoSuchModelError} If no such model exists.
|
|
170
|
+
*/
|
|
171
|
+
rerankingModel(modelId: string): RerankingModel;
|
|
299
172
|
};
|
|
300
173
|
|
|
301
174
|
/**
|
|
@@ -304,17 +177,17 @@ Additional provider-specific metadata that is returned from the provider.
|
|
|
304
177
|
This is needed to enable provider-specific functionality that can be
|
|
305
178
|
fully encapsulated in the provider.
|
|
306
179
|
*/
|
|
307
|
-
type ProviderMetadata =
|
|
180
|
+
type ProviderMetadata = SharedV3ProviderMetadata;
|
|
308
181
|
|
|
309
182
|
/**
|
|
310
183
|
Speech model that is used by the AI SDK Core functions.
|
|
311
184
|
*/
|
|
312
|
-
type SpeechModel = SpeechModelV2;
|
|
185
|
+
type SpeechModel = string | SpeechModelV3 | SpeechModelV2;
|
|
313
186
|
/**
|
|
314
187
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
315
188
|
some settings might not be supported, which can lead to suboptimal results.
|
|
316
189
|
*/
|
|
317
|
-
type SpeechWarning =
|
|
190
|
+
type SpeechWarning = SpeechModelV3CallWarning;
|
|
318
191
|
|
|
319
192
|
type SpeechModelResponseMetadata = {
|
|
320
193
|
/**
|
|
@@ -338,12 +211,12 @@ type SpeechModelResponseMetadata = {
|
|
|
338
211
|
/**
|
|
339
212
|
Transcription model that is used by the AI SDK Core functions.
|
|
340
213
|
*/
|
|
341
|
-
type TranscriptionModel = TranscriptionModelV2;
|
|
214
|
+
type TranscriptionModel = string | TranscriptionModelV3 | TranscriptionModelV2;
|
|
342
215
|
/**
|
|
343
216
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
|
344
217
|
some settings might not be supported, which can lead to suboptimal results.
|
|
345
218
|
*/
|
|
346
|
-
type TranscriptionWarning =
|
|
219
|
+
type TranscriptionWarning = TranscriptionModelV3CallWarning;
|
|
347
220
|
|
|
348
221
|
type TranscriptionModelResponseMetadata = {
|
|
349
222
|
/**
|
|
@@ -363,7 +236,7 @@ type TranscriptionModelResponseMetadata = {
|
|
|
363
236
|
/**
|
|
364
237
|
Represents the number of tokens used in a prompt and completion.
|
|
365
238
|
*/
|
|
366
|
-
type LanguageModelUsage =
|
|
239
|
+
type LanguageModelUsage = LanguageModelV3Usage;
|
|
367
240
|
/**
|
|
368
241
|
Represents the number of tokens used in an embedding.
|
|
369
242
|
*/
|
|
@@ -373,31 +246,10 @@ type EmbeddingModelUsage = {
|
|
|
373
246
|
*/
|
|
374
247
|
tokens: number;
|
|
375
248
|
};
|
|
376
|
-
|
|
377
249
|
/**
|
|
378
|
-
|
|
379
|
-
*
|
|
380
|
-
* Download function. Called with the array of URLs and a boolean indicating
|
|
381
|
-
* whether the URL is supported by the model.
|
|
382
|
-
*
|
|
383
|
-
* The download function can decide for each URL:
|
|
384
|
-
* - to return null (which means that the URL should be passed to the model)
|
|
385
|
-
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
386
|
-
*
|
|
387
|
-
* Should throw DownloadError if the download fails.
|
|
388
|
-
*
|
|
389
|
-
* Should return an array of objects sorted by the order of the requested downloads.
|
|
390
|
-
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
391
|
-
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
392
|
-
* For each object, the data should be null if the URL should be passed through as is.
|
|
250
|
+
Usage information for an image model call.
|
|
393
251
|
*/
|
|
394
|
-
type
|
|
395
|
-
url: URL;
|
|
396
|
-
isUrlSupportedByModel: boolean;
|
|
397
|
-
}>) => PromiseLike<Array<{
|
|
398
|
-
data: Uint8Array;
|
|
399
|
-
mediaType: string | undefined;
|
|
400
|
-
} | null>>;
|
|
252
|
+
type ImageModelUsage = ImageModelV3Usage;
|
|
401
253
|
|
|
402
254
|
/**
|
|
403
255
|
* A generated file.
|
|
@@ -419,23 +271,6 @@ interface GeneratedFile {
|
|
|
419
271
|
readonly mediaType: string;
|
|
420
272
|
}
|
|
421
273
|
|
|
422
|
-
/**
|
|
423
|
-
* Reasoning output of a text generation. It contains a reasoning.
|
|
424
|
-
*/
|
|
425
|
-
interface ReasoningOutput {
|
|
426
|
-
type: 'reasoning';
|
|
427
|
-
/**
|
|
428
|
-
* The reasoning text.
|
|
429
|
-
*/
|
|
430
|
-
text: string;
|
|
431
|
-
/**
|
|
432
|
-
* Additional provider-specific metadata. They are passed through
|
|
433
|
-
* to the provider from the AI SDK and enable provider-specific
|
|
434
|
-
* functionality that can be fully encapsulated in the provider.
|
|
435
|
-
*/
|
|
436
|
-
providerMetadata?: ProviderMetadata;
|
|
437
|
-
}
|
|
438
|
-
|
|
439
274
|
/**
|
|
440
275
|
Create a union of the given object's values, and optionally specify which keys to get the values from.
|
|
441
276
|
|
|
@@ -478,29 +313,29 @@ onlyBar('bar');
|
|
|
478
313
|
*/
|
|
479
314
|
type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
|
|
480
315
|
|
|
481
|
-
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta'>>;
|
|
316
|
+
type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta' | 'needsApproval'>>;
|
|
482
317
|
|
|
318
|
+
type BaseToolCall = {
|
|
319
|
+
type: 'tool-call';
|
|
320
|
+
toolCallId: string;
|
|
321
|
+
providerExecuted?: boolean;
|
|
322
|
+
providerMetadata?: ProviderMetadata;
|
|
323
|
+
};
|
|
483
324
|
type StaticToolCall<TOOLS extends ToolSet> = ValueOf<{
|
|
484
|
-
[NAME in keyof TOOLS]: {
|
|
485
|
-
type: 'tool-call';
|
|
486
|
-
toolCallId: string;
|
|
325
|
+
[NAME in keyof TOOLS]: BaseToolCall & {
|
|
487
326
|
toolName: NAME & string;
|
|
488
327
|
input: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
|
|
489
|
-
providerExecuted?: boolean;
|
|
490
328
|
dynamic?: false | undefined;
|
|
491
329
|
invalid?: false | undefined;
|
|
492
330
|
error?: never;
|
|
493
|
-
|
|
331
|
+
title?: string;
|
|
494
332
|
};
|
|
495
333
|
}>;
|
|
496
|
-
type DynamicToolCall = {
|
|
497
|
-
type: 'tool-call';
|
|
498
|
-
toolCallId: string;
|
|
334
|
+
type DynamicToolCall = BaseToolCall & {
|
|
499
335
|
toolName: string;
|
|
500
336
|
input: unknown;
|
|
501
|
-
providerExecuted?: boolean;
|
|
502
337
|
dynamic: true;
|
|
503
|
-
|
|
338
|
+
title?: string;
|
|
504
339
|
/**
|
|
505
340
|
* True if this is caused by an unparsable tool call or
|
|
506
341
|
* a tool that does not exist.
|
|
@@ -513,6 +348,40 @@ type DynamicToolCall = {
|
|
|
513
348
|
};
|
|
514
349
|
type TypedToolCall<TOOLS extends ToolSet> = StaticToolCall<TOOLS> | DynamicToolCall;
|
|
515
350
|
|
|
351
|
+
/**
|
|
352
|
+
* Output part that indicates that a tool approval request has been made.
|
|
353
|
+
*
|
|
354
|
+
* The tool approval request can be approved or denied in the next tool message.
|
|
355
|
+
*/
|
|
356
|
+
type ToolApprovalRequestOutput<TOOLS extends ToolSet> = {
|
|
357
|
+
type: 'tool-approval-request';
|
|
358
|
+
/**
|
|
359
|
+
* ID of the tool approval request.
|
|
360
|
+
*/
|
|
361
|
+
approvalId: string;
|
|
362
|
+
/**
|
|
363
|
+
* Tool call that the approval request is for.
|
|
364
|
+
*/
|
|
365
|
+
toolCall: TypedToolCall<TOOLS>;
|
|
366
|
+
};
|
|
367
|
+
|
|
368
|
+
/**
|
|
369
|
+
* Reasoning output of a text generation. It contains a reasoning.
|
|
370
|
+
*/
|
|
371
|
+
interface ReasoningOutput {
|
|
372
|
+
type: 'reasoning';
|
|
373
|
+
/**
|
|
374
|
+
* The reasoning text.
|
|
375
|
+
*/
|
|
376
|
+
text: string;
|
|
377
|
+
/**
|
|
378
|
+
* Additional provider-specific metadata. They are passed through
|
|
379
|
+
* to the provider from the AI SDK and enable provider-specific
|
|
380
|
+
* functionality that can be fully encapsulated in the provider.
|
|
381
|
+
*/
|
|
382
|
+
providerMetadata?: ProviderMetadata;
|
|
383
|
+
}
|
|
384
|
+
|
|
516
385
|
type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
|
|
517
386
|
[NAME in keyof TOOLS]: {
|
|
518
387
|
type: 'tool-error';
|
|
@@ -521,7 +390,9 @@ type StaticToolError<TOOLS extends ToolSet> = ValueOf<{
|
|
|
521
390
|
input: InferToolInput<TOOLS[NAME]>;
|
|
522
391
|
error: unknown;
|
|
523
392
|
providerExecuted?: boolean;
|
|
393
|
+
providerMetadata?: ProviderMetadata;
|
|
524
394
|
dynamic?: false | undefined;
|
|
395
|
+
title?: string;
|
|
525
396
|
};
|
|
526
397
|
}>;
|
|
527
398
|
type DynamicToolError = {
|
|
@@ -531,7 +402,9 @@ type DynamicToolError = {
|
|
|
531
402
|
input: unknown;
|
|
532
403
|
error: unknown;
|
|
533
404
|
providerExecuted?: boolean;
|
|
405
|
+
providerMetadata?: ProviderMetadata;
|
|
534
406
|
dynamic: true;
|
|
407
|
+
title?: string;
|
|
535
408
|
};
|
|
536
409
|
type TypedToolError<TOOLS extends ToolSet> = StaticToolError<TOOLS> | DynamicToolError;
|
|
537
410
|
|
|
@@ -543,8 +416,10 @@ type StaticToolResult<TOOLS extends ToolSet> = ValueOf<{
|
|
|
543
416
|
input: InferToolInput<TOOLS[NAME]>;
|
|
544
417
|
output: InferToolOutput<TOOLS[NAME]>;
|
|
545
418
|
providerExecuted?: boolean;
|
|
419
|
+
providerMetadata?: ProviderMetadata;
|
|
546
420
|
dynamic?: false | undefined;
|
|
547
421
|
preliminary?: boolean;
|
|
422
|
+
title?: string;
|
|
548
423
|
};
|
|
549
424
|
}>;
|
|
550
425
|
type DynamicToolResult = {
|
|
@@ -554,8 +429,10 @@ type DynamicToolResult = {
|
|
|
554
429
|
input: unknown;
|
|
555
430
|
output: unknown;
|
|
556
431
|
providerExecuted?: boolean;
|
|
432
|
+
providerMetadata?: ProviderMetadata;
|
|
557
433
|
dynamic: true;
|
|
558
434
|
preliminary?: boolean;
|
|
435
|
+
title?: string;
|
|
559
436
|
};
|
|
560
437
|
type TypedToolResult<TOOLS extends ToolSet> = StaticToolResult<TOOLS> | DynamicToolResult;
|
|
561
438
|
|
|
@@ -581,18 +458,133 @@ type ContentPart<TOOLS extends ToolSet> = {
|
|
|
581
458
|
type: 'tool-error';
|
|
582
459
|
} & TypedToolError<TOOLS> & {
|
|
583
460
|
providerMetadata?: ProviderMetadata;
|
|
584
|
-
})
|
|
461
|
+
}) | ToolApprovalRequestOutput<TOOLS>;
|
|
585
462
|
|
|
586
463
|
/**
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
type ResponseMessage = AssistantModelMessage | ToolModelMessage;
|
|
464
|
+
Create a type from an object with all keys and nested keys set to optional.
|
|
465
|
+
The helper supports normal objects and schemas (which are resolved automatically).
|
|
466
|
+
It always recurses into arrays.
|
|
591
467
|
|
|
592
|
-
|
|
593
|
-
* The result of a single step in the generation process.
|
|
468
|
+
Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
|
|
594
469
|
*/
|
|
595
|
-
type
|
|
470
|
+
type DeepPartial<T> = T extends FlexibleSchema ? DeepPartialInternal<InferSchema<T>> : DeepPartialInternal<T>;
|
|
471
|
+
type DeepPartialInternal<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartialInternal<ItemType | undefined>> : Array<DeepPartialInternal<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
|
|
472
|
+
type PartialMap<KeyType, ValueType> = {} & Map<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
|
|
473
|
+
type PartialSet<T> = {} & Set<DeepPartialInternal<T>>;
|
|
474
|
+
type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartialInternal<KeyType>, DeepPartialInternal<ValueType>>;
|
|
475
|
+
type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartialInternal<T>>;
|
|
476
|
+
type PartialObject<ObjectType extends object> = {
|
|
477
|
+
[KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
|
|
478
|
+
};
|
|
479
|
+
|
|
480
|
+
interface Output<OUTPUT = any, PARTIAL = any> {
|
|
481
|
+
/**
|
|
482
|
+
* The response format to use for the model.
|
|
483
|
+
*/
|
|
484
|
+
responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
|
|
485
|
+
/**
|
|
486
|
+
* Parses the complete output of the model.
|
|
487
|
+
*/
|
|
488
|
+
parseCompleteOutput(options: {
|
|
489
|
+
text: string;
|
|
490
|
+
}, context: {
|
|
491
|
+
response: LanguageModelResponseMetadata;
|
|
492
|
+
usage: LanguageModelUsage;
|
|
493
|
+
finishReason: FinishReason;
|
|
494
|
+
}): Promise<OUTPUT>;
|
|
495
|
+
/**
|
|
496
|
+
* Parses the partial output of the model.
|
|
497
|
+
*/
|
|
498
|
+
parsePartialOutput(options: {
|
|
499
|
+
text: string;
|
|
500
|
+
}): Promise<{
|
|
501
|
+
partial: PARTIAL;
|
|
502
|
+
} | undefined>;
|
|
503
|
+
}
|
|
504
|
+
/**
|
|
505
|
+
* Output specification for text generation.
|
|
506
|
+
* This is the default output mode that generates plain text.
|
|
507
|
+
*
|
|
508
|
+
* @returns An output specification for generating text.
|
|
509
|
+
*/
|
|
510
|
+
declare const text: () => Output<string, string>;
|
|
511
|
+
/**
|
|
512
|
+
* Output specification for typed object generation using schemas.
|
|
513
|
+
* When the model generates a text response, it will return an object that matches the schema.
|
|
514
|
+
*
|
|
515
|
+
* @param schema - The schema of the object to generate.
|
|
516
|
+
*
|
|
517
|
+
* @returns An output specification for generating objects with the specified schema.
|
|
518
|
+
*/
|
|
519
|
+
declare const object: <OBJECT>({ schema: inputSchema, }: {
|
|
520
|
+
schema: FlexibleSchema<OBJECT>;
|
|
521
|
+
}) => Output<OBJECT, DeepPartial<OBJECT>>;
|
|
522
|
+
/**
|
|
523
|
+
* Output specification for array generation.
|
|
524
|
+
* When the model generates a text response, it will return an array of elements.
|
|
525
|
+
*
|
|
526
|
+
* @param element - The schema of the array elements to generate.
|
|
527
|
+
*
|
|
528
|
+
* @returns An output specification for generating an array of elements.
|
|
529
|
+
*/
|
|
530
|
+
declare const array: <ELEMENT>({ element: inputElementSchema, }: {
|
|
531
|
+
element: FlexibleSchema<ELEMENT>;
|
|
532
|
+
}) => Output<Array<ELEMENT>, Array<ELEMENT>>;
|
|
533
|
+
/**
|
|
534
|
+
* Output specification for choice generation.
|
|
535
|
+
* When the model generates a text response, it will return a one of the choice options.
|
|
536
|
+
*
|
|
537
|
+
* @param options - The available choices.
|
|
538
|
+
*
|
|
539
|
+
* @returns An output specification for generating a choice.
|
|
540
|
+
*/
|
|
541
|
+
declare const choice: <CHOICE extends string>({ options: choiceOptions, }: {
|
|
542
|
+
options: Array<CHOICE>;
|
|
543
|
+
}) => Output<CHOICE, CHOICE>;
|
|
544
|
+
/**
|
|
545
|
+
* Output specification for unstructured JSON generation.
|
|
546
|
+
* When the model generates a text response, it will return a JSON object.
|
|
547
|
+
*
|
|
548
|
+
* @returns An output specification for generating JSON.
|
|
549
|
+
*/
|
|
550
|
+
declare const json: () => Output<JSONValue$1, JSONValue$1>;
|
|
551
|
+
|
|
552
|
+
type output_Output<OUTPUT = any, PARTIAL = any> = Output<OUTPUT, PARTIAL>;
|
|
553
|
+
declare const output_array: typeof array;
|
|
554
|
+
declare const output_choice: typeof choice;
|
|
555
|
+
declare const output_json: typeof json;
|
|
556
|
+
declare const output_object: typeof object;
|
|
557
|
+
declare const output_text: typeof text;
|
|
558
|
+
declare namespace output {
|
|
559
|
+
export {
|
|
560
|
+
output_Output as Output,
|
|
561
|
+
output_array as array,
|
|
562
|
+
output_choice as choice,
|
|
563
|
+
output_json as json,
|
|
564
|
+
output_object as object,
|
|
565
|
+
output_text as text,
|
|
566
|
+
};
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
/**
|
|
570
|
+
* Infers the complete output type from the output specification.
|
|
571
|
+
*/
|
|
572
|
+
type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any> ? COMPLETE_OUTPUT : never;
|
|
573
|
+
/**
|
|
574
|
+
* Infers the partial output type from the output specification.
|
|
575
|
+
*/
|
|
576
|
+
type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT> ? PARTIAL_OUTPUT : never;
|
|
577
|
+
|
|
578
|
+
/**
|
|
579
|
+
A message that was generated during the generation process.
|
|
580
|
+
It can be either an assistant message or a tool message.
|
|
581
|
+
*/
|
|
582
|
+
type ResponseMessage = AssistantModelMessage | ToolModelMessage;
|
|
583
|
+
|
|
584
|
+
/**
|
|
585
|
+
* The result of a single step in the generation process.
|
|
586
|
+
*/
|
|
587
|
+
type StepResult<TOOLS extends ToolSet> = {
|
|
596
588
|
/**
|
|
597
589
|
The content that was generated in the last step.
|
|
598
590
|
*/
|
|
@@ -684,7 +676,7 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
|
684
676
|
The result of a `generateText` call.
|
|
685
677
|
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
|
686
678
|
*/
|
|
687
|
-
interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
679
|
+
interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
|
|
688
680
|
/**
|
|
689
681
|
The content that was generated in the last step.
|
|
690
682
|
*/
|
|
@@ -787,124 +779,251 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
|
787
779
|
*/
|
|
788
780
|
readonly steps: Array<StepResult<TOOLS>>;
|
|
789
781
|
/**
|
|
790
|
-
The generated structured output. It uses the `
|
|
782
|
+
The generated structured output. It uses the `output` specification.
|
|
783
|
+
|
|
784
|
+
@deprecated Use `output` instead.
|
|
785
|
+
*/
|
|
786
|
+
readonly experimental_output: InferCompleteOutput<OUTPUT>;
|
|
787
|
+
/**
|
|
788
|
+
The generated structured output. It uses the `output` specification.
|
|
789
|
+
|
|
791
790
|
*/
|
|
792
|
-
readonly
|
|
791
|
+
readonly output: InferCompleteOutput<OUTPUT>;
|
|
793
792
|
}
|
|
794
793
|
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
794
|
+
type CallSettings = {
|
|
795
|
+
/**
|
|
796
|
+
Maximum number of tokens to generate.
|
|
797
|
+
*/
|
|
798
|
+
maxOutputTokens?: number;
|
|
799
|
+
/**
|
|
800
|
+
Temperature setting. The range depends on the provider and model.
|
|
801
|
+
|
|
802
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
803
|
+
*/
|
|
804
|
+
temperature?: number;
|
|
805
|
+
/**
|
|
806
|
+
Nucleus sampling. This is a number between 0 and 1.
|
|
807
|
+
|
|
808
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
809
|
+
are considered.
|
|
810
|
+
|
|
811
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
812
|
+
*/
|
|
813
|
+
topP?: number;
|
|
814
|
+
/**
|
|
815
|
+
Only sample from the top K options for each subsequent token.
|
|
816
|
+
|
|
817
|
+
Used to remove "long tail" low probability responses.
|
|
818
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
819
|
+
*/
|
|
820
|
+
topK?: number;
|
|
821
|
+
/**
|
|
822
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
823
|
+
repeat information that is already in the prompt.
|
|
824
|
+
|
|
825
|
+
The presence penalty is a number between -1 (increase repetition)
|
|
826
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
827
|
+
*/
|
|
828
|
+
presencePenalty?: number;
|
|
829
|
+
/**
|
|
830
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
831
|
+
to repeatedly use the same words or phrases.
|
|
832
|
+
|
|
833
|
+
The frequency penalty is a number between -1 (increase repetition)
|
|
834
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
835
|
+
*/
|
|
836
|
+
frequencyPenalty?: number;
|
|
837
|
+
/**
|
|
838
|
+
Stop sequences.
|
|
839
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
840
|
+
Providers may have limits on the number of stop sequences.
|
|
841
|
+
*/
|
|
842
|
+
stopSequences?: string[];
|
|
843
|
+
/**
|
|
844
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
845
|
+
by the model, calls will generate deterministic results.
|
|
846
|
+
*/
|
|
847
|
+
seed?: number;
|
|
848
|
+
/**
|
|
849
|
+
Maximum number of retries. Set to 0 to disable retries.
|
|
850
|
+
|
|
851
|
+
@default 2
|
|
852
|
+
*/
|
|
853
|
+
maxRetries?: number;
|
|
854
|
+
/**
|
|
855
|
+
Abort signal.
|
|
856
|
+
*/
|
|
857
|
+
abortSignal?: AbortSignal;
|
|
858
|
+
/**
|
|
859
|
+
Additional HTTP headers to be sent with the request.
|
|
860
|
+
Only applicable for HTTP-based providers.
|
|
861
|
+
*/
|
|
862
|
+
headers?: Record<string, string | undefined>;
|
|
810
863
|
};
|
|
811
864
|
|
|
812
|
-
interface Output<OUTPUT, PARTIAL> {
|
|
813
|
-
readonly type: 'object' | 'text';
|
|
814
|
-
responseFormat: LanguageModelV2CallOptions['responseFormat'];
|
|
815
|
-
parsePartial(options: {
|
|
816
|
-
text: string;
|
|
817
|
-
}): Promise<{
|
|
818
|
-
partial: PARTIAL;
|
|
819
|
-
} | undefined>;
|
|
820
|
-
parseOutput(options: {
|
|
821
|
-
text: string;
|
|
822
|
-
}, context: {
|
|
823
|
-
response: LanguageModelResponseMetadata;
|
|
824
|
-
usage: LanguageModelUsage;
|
|
825
|
-
finishReason: FinishReason;
|
|
826
|
-
}): Promise<OUTPUT>;
|
|
827
|
-
}
|
|
828
|
-
declare const text: () => Output<string, string>;
|
|
829
|
-
declare const object: <OUTPUT>({ schema: inputSchema, }: {
|
|
830
|
-
schema: z4.core.$ZodType<OUTPUT, any> | z3.Schema<OUTPUT, z3.ZodTypeDef, any> | Schema<OUTPUT>;
|
|
831
|
-
}) => Output<OUTPUT, DeepPartial<OUTPUT>>;
|
|
832
|
-
|
|
833
|
-
type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
|
|
834
|
-
declare const output_object: typeof object;
|
|
835
|
-
declare const output_text: typeof text;
|
|
836
|
-
declare namespace output {
|
|
837
|
-
export {
|
|
838
|
-
output_Output as Output,
|
|
839
|
-
output_object as object,
|
|
840
|
-
output_text as text,
|
|
841
|
-
};
|
|
842
|
-
}
|
|
843
|
-
|
|
844
865
|
/**
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
@returns An object that contains the settings for the step.
|
|
853
|
-
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
|
854
|
-
*/
|
|
855
|
-
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
|
|
856
|
-
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
|
857
|
-
stepNumber: number;
|
|
858
|
-
model: LanguageModel;
|
|
859
|
-
messages: Array<ModelMessage>;
|
|
860
|
-
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
|
|
861
|
-
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
|
|
862
|
-
model?: LanguageModel;
|
|
863
|
-
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
|
864
|
-
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
|
866
|
+
Prompt part of the AI function options.
|
|
867
|
+
It contains a system message, a simple text prompt, or a list of messages.
|
|
868
|
+
*/
|
|
869
|
+
type Prompt = {
|
|
870
|
+
/**
|
|
871
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
872
|
+
*/
|
|
865
873
|
system?: string;
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
type StopCondition<TOOLS extends ToolSet> = (options: {
|
|
870
|
-
steps: Array<StepResult<TOOLS>>;
|
|
871
|
-
}) => PromiseLike<boolean> | boolean;
|
|
872
|
-
declare function stepCountIs(stepCount: number): StopCondition<any>;
|
|
873
|
-
declare function hasToolCall(toolName: string): StopCondition<any>;
|
|
874
|
+
} & ({
|
|
875
|
+
/**
|
|
876
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
874
877
|
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
constructor({ toolInput, toolName, cause, message, }: {
|
|
881
|
-
message?: string;
|
|
882
|
-
toolInput: string;
|
|
883
|
-
toolName: string;
|
|
884
|
-
cause: unknown;
|
|
885
|
-
});
|
|
886
|
-
static isInstance(error: unknown): error is InvalidToolInputError;
|
|
887
|
-
}
|
|
878
|
+
You can either use `prompt` or `messages` but not both.
|
|
879
|
+
*/
|
|
880
|
+
prompt: string | Array<ModelMessage>;
|
|
881
|
+
/**
|
|
882
|
+
A list of messages.
|
|
888
883
|
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
toolName: string;
|
|
896
|
-
availableTools?: string[] | undefined;
|
|
897
|
-
message?: string;
|
|
898
|
-
});
|
|
899
|
-
static isInstance(error: unknown): error is NoSuchToolError;
|
|
900
|
-
}
|
|
884
|
+
You can either use `prompt` or `messages` but not both.
|
|
885
|
+
*/
|
|
886
|
+
messages?: never;
|
|
887
|
+
} | {
|
|
888
|
+
/**
|
|
889
|
+
A list of messages.
|
|
901
890
|
|
|
902
|
-
|
|
903
|
-
@deprecated Use `SystemModelMessage` instead.
|
|
891
|
+
You can either use `prompt` or `messages` but not both.
|
|
904
892
|
*/
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
893
|
+
messages: Array<ModelMessage>;
|
|
894
|
+
/**
|
|
895
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
896
|
+
|
|
897
|
+
You can either use `prompt` or `messages` but not both.
|
|
898
|
+
*/
|
|
899
|
+
prompt?: never;
|
|
900
|
+
});
|
|
901
|
+
|
|
902
|
+
/**
|
|
903
|
+
* Telemetry configuration.
|
|
904
|
+
*/
|
|
905
|
+
type TelemetrySettings = {
|
|
906
|
+
/**
|
|
907
|
+
* Enable or disable telemetry. Disabled by default while experimental.
|
|
908
|
+
*/
|
|
909
|
+
isEnabled?: boolean;
|
|
910
|
+
/**
|
|
911
|
+
* Enable or disable input recording. Enabled by default.
|
|
912
|
+
*
|
|
913
|
+
* You might want to disable input recording to avoid recording sensitive
|
|
914
|
+
* information, to reduce data transfers, or to increase performance.
|
|
915
|
+
*/
|
|
916
|
+
recordInputs?: boolean;
|
|
917
|
+
/**
|
|
918
|
+
* Enable or disable output recording. Enabled by default.
|
|
919
|
+
*
|
|
920
|
+
* You might want to disable output recording to avoid recording sensitive
|
|
921
|
+
* information, to reduce data transfers, or to increase performance.
|
|
922
|
+
*/
|
|
923
|
+
recordOutputs?: boolean;
|
|
924
|
+
/**
|
|
925
|
+
* Identifier for this function. Used to group telemetry data by function.
|
|
926
|
+
*/
|
|
927
|
+
functionId?: string;
|
|
928
|
+
/**
|
|
929
|
+
* Additional information to include in the telemetry data.
|
|
930
|
+
*/
|
|
931
|
+
metadata?: Record<string, AttributeValue>;
|
|
932
|
+
/**
|
|
933
|
+
* A custom tracer to use for the telemetry data.
|
|
934
|
+
*/
|
|
935
|
+
tracer?: Tracer;
|
|
936
|
+
};
|
|
937
|
+
|
|
938
|
+
/**
|
|
939
|
+
* Experimental. Can change in patch versions without warning.
|
|
940
|
+
*
|
|
941
|
+
* Download function. Called with the array of URLs and a boolean indicating
|
|
942
|
+
* whether the URL is supported by the model.
|
|
943
|
+
*
|
|
944
|
+
* The download function can decide for each URL:
|
|
945
|
+
* - to return null (which means that the URL should be passed to the model)
|
|
946
|
+
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
947
|
+
*
|
|
948
|
+
* Should throw DownloadError if the download fails.
|
|
949
|
+
*
|
|
950
|
+
* Should return an array of objects sorted by the order of the requested downloads.
|
|
951
|
+
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
952
|
+
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
953
|
+
* For each object, the data should be null if the URL should be passed through as is.
|
|
954
|
+
*/
|
|
955
|
+
type DownloadFunction = (options: Array<{
|
|
956
|
+
url: URL;
|
|
957
|
+
isUrlSupportedByModel: boolean;
|
|
958
|
+
}>) => PromiseLike<Array<{
|
|
959
|
+
data: Uint8Array;
|
|
960
|
+
mediaType: string | undefined;
|
|
961
|
+
} | null>>;
|
|
962
|
+
|
|
963
|
+
/**
|
|
964
|
+
Function that you can use to provide different settings for a step.
|
|
965
|
+
|
|
966
|
+
@param options - The options for the step.
|
|
967
|
+
@param options.steps - The steps that have been executed so far.
|
|
968
|
+
@param options.stepNumber - The number of the step that is being executed.
|
|
969
|
+
@param options.model - The model that is being used.
|
|
970
|
+
|
|
971
|
+
@returns An object that contains the settings for the step.
|
|
972
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
|
973
|
+
*/
|
|
974
|
+
type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
|
|
975
|
+
steps: Array<StepResult<NoInfer<TOOLS>>>;
|
|
976
|
+
stepNumber: number;
|
|
977
|
+
model: LanguageModel;
|
|
978
|
+
messages: Array<ModelMessage>;
|
|
979
|
+
}) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
|
|
980
|
+
type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
|
|
981
|
+
model?: LanguageModel;
|
|
982
|
+
toolChoice?: ToolChoice<NoInfer<TOOLS>>;
|
|
983
|
+
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
|
984
|
+
system?: string;
|
|
985
|
+
messages?: Array<ModelMessage>;
|
|
986
|
+
} | undefined;
|
|
987
|
+
|
|
988
|
+
type StopCondition<TOOLS extends ToolSet> = (options: {
|
|
989
|
+
steps: Array<StepResult<TOOLS>>;
|
|
990
|
+
}) => PromiseLike<boolean> | boolean;
|
|
991
|
+
declare function stepCountIs(stepCount: number): StopCondition<any>;
|
|
992
|
+
declare function hasToolCall(toolName: string): StopCondition<any>;
|
|
993
|
+
|
|
994
|
+
declare const symbol$d: unique symbol;
|
|
995
|
+
declare class InvalidToolInputError extends AISDKError {
|
|
996
|
+
private readonly [symbol$d];
|
|
997
|
+
readonly toolName: string;
|
|
998
|
+
readonly toolInput: string;
|
|
999
|
+
constructor({ toolInput, toolName, cause, message, }: {
|
|
1000
|
+
message?: string;
|
|
1001
|
+
toolInput: string;
|
|
1002
|
+
toolName: string;
|
|
1003
|
+
cause: unknown;
|
|
1004
|
+
});
|
|
1005
|
+
static isInstance(error: unknown): error is InvalidToolInputError;
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
declare const symbol$c: unique symbol;
|
|
1009
|
+
declare class NoSuchToolError extends AISDKError {
|
|
1010
|
+
private readonly [symbol$c];
|
|
1011
|
+
readonly toolName: string;
|
|
1012
|
+
readonly availableTools: string[] | undefined;
|
|
1013
|
+
constructor({ toolName, availableTools, message, }: {
|
|
1014
|
+
toolName: string;
|
|
1015
|
+
availableTools?: string[] | undefined;
|
|
1016
|
+
message?: string;
|
|
1017
|
+
});
|
|
1018
|
+
static isInstance(error: unknown): error is NoSuchToolError;
|
|
1019
|
+
}
|
|
1020
|
+
|
|
1021
|
+
/**
|
|
1022
|
+
@deprecated Use `SystemModelMessage` instead.
|
|
1023
|
+
*/
|
|
1024
|
+
type CoreSystemMessage = SystemModelMessage;
|
|
1025
|
+
declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
|
|
1026
|
+
/**
|
|
908
1027
|
@deprecated Use `systemModelMessageSchema` instead.
|
|
909
1028
|
*/
|
|
910
1029
|
declare const coreSystemMessageSchema: z.ZodType<SystemModelMessage, unknown, z.core.$ZodTypeInternals<SystemModelMessage, unknown>>;
|
|
@@ -961,13 +1080,13 @@ declare const coreMessageSchema: z.ZodType<CoreMessage>;
|
|
|
961
1080
|
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
|
|
962
1081
|
system: string | undefined;
|
|
963
1082
|
messages: ModelMessage[];
|
|
964
|
-
toolCall:
|
|
1083
|
+
toolCall: LanguageModelV3ToolCall;
|
|
965
1084
|
tools: TOOLS;
|
|
966
1085
|
inputSchema: (options: {
|
|
967
1086
|
toolName: string;
|
|
968
|
-
}) => JSONSchema7
|
|
1087
|
+
}) => PromiseLike<JSONSchema7>;
|
|
969
1088
|
error: NoSuchToolError | InvalidToolInputError;
|
|
970
|
-
}) => Promise<
|
|
1089
|
+
}) => Promise<LanguageModelV3ToolCall | null>;
|
|
971
1090
|
|
|
972
1091
|
/**
|
|
973
1092
|
Callback that is set using the `onStepFinish` option.
|
|
@@ -976,6 +1095,21 @@ Callback that is set using the `onStepFinish` option.
|
|
|
976
1095
|
*/
|
|
977
1096
|
type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
978
1097
|
/**
|
|
1098
|
+
Callback that is set using the `onFinish` option.
|
|
1099
|
+
|
|
1100
|
+
@param event - The event that is passed to the callback.
|
|
1101
|
+
*/
|
|
1102
|
+
type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
|
1103
|
+
/**
|
|
1104
|
+
Details for all steps.
|
|
1105
|
+
*/
|
|
1106
|
+
readonly steps: StepResult<TOOLS>[];
|
|
1107
|
+
/**
|
|
1108
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
|
1109
|
+
*/
|
|
1110
|
+
readonly totalUsage: LanguageModelUsage;
|
|
1111
|
+
}) => PromiseLike<void> | void;
|
|
1112
|
+
/**
|
|
979
1113
|
Generate a text and call tools for a given prompt using a language model.
|
|
980
1114
|
|
|
981
1115
|
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
|
@@ -1017,11 +1151,12 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1017
1151
|
@param experimental_generateMessageId - Generate a unique ID for each message.
|
|
1018
1152
|
|
|
1019
1153
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1154
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1020
1155
|
|
|
1021
1156
|
@returns
|
|
1022
1157
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
|
1023
1158
|
*/
|
|
1024
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT =
|
|
1159
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1025
1160
|
/**
|
|
1026
1161
|
The language model to use.
|
|
1027
1162
|
*/
|
|
@@ -1063,7 +1198,13 @@ changing the tool call and result types in the result.
|
|
|
1063
1198
|
/**
|
|
1064
1199
|
Optional specification for parsing structured outputs from the LLM response.
|
|
1065
1200
|
*/
|
|
1066
|
-
|
|
1201
|
+
output?: OUTPUT;
|
|
1202
|
+
/**
|
|
1203
|
+
Optional specification for parsing structured outputs from the LLM response.
|
|
1204
|
+
|
|
1205
|
+
@deprecated Use `output` instead.
|
|
1206
|
+
*/
|
|
1207
|
+
experimental_output?: OUTPUT;
|
|
1067
1208
|
/**
|
|
1068
1209
|
Custom download function to use for URLs.
|
|
1069
1210
|
|
|
@@ -1083,9 +1224,13 @@ A function that attempts to repair a tool call that failed to parse.
|
|
|
1083
1224
|
*/
|
|
1084
1225
|
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
|
|
1085
1226
|
/**
|
|
1086
|
-
|
|
1087
|
-
|
|
1227
|
+
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1228
|
+
*/
|
|
1088
1229
|
onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
1230
|
+
/**
|
|
1231
|
+
* Callback that is called when all steps are finished and the response is complete.
|
|
1232
|
+
*/
|
|
1233
|
+
onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
|
|
1089
1234
|
/**
|
|
1090
1235
|
* Context that is passed into tool execution.
|
|
1091
1236
|
*
|
|
@@ -1103,6 +1248,26 @@ A function that attempts to repair a tool call that failed to parse.
|
|
|
1103
1248
|
};
|
|
1104
1249
|
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
1105
1250
|
|
|
1251
|
+
/**
|
|
1252
|
+
* Prunes model messages from a list of model messages.
|
|
1253
|
+
*
|
|
1254
|
+
* @param messages - The list of model messages to prune.
|
|
1255
|
+
* @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
|
|
1256
|
+
* @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
|
|
1257
|
+
* @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
|
|
1258
|
+
*
|
|
1259
|
+
* @returns The pruned list of model messages.
|
|
1260
|
+
*/
|
|
1261
|
+
declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
|
|
1262
|
+
messages: ModelMessage[];
|
|
1263
|
+
reasoning?: 'all' | 'before-last-message' | 'none';
|
|
1264
|
+
toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
|
|
1265
|
+
type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
|
|
1266
|
+
tools?: string[];
|
|
1267
|
+
}>;
|
|
1268
|
+
emptyMessages?: 'keep' | 'remove';
|
|
1269
|
+
}): ModelMessage[];
|
|
1270
|
+
|
|
1106
1271
|
/**
|
|
1107
1272
|
* Detects the first chunk in a buffer.
|
|
1108
1273
|
*
|
|
@@ -1229,18 +1394,15 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1229
1394
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
1230
1395
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
1231
1396
|
|
|
1232
|
-
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
|
1233
|
-
|
|
1234
1397
|
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
|
1235
1398
|
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
|
1236
1399
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1237
|
-
@param onFinish - Callback that is called when
|
|
1238
|
-
(for tools that have an `execute` function) are finished.
|
|
1400
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1239
1401
|
|
|
1240
1402
|
@return
|
|
1241
1403
|
A result object for accessing different stream types and additional information.
|
|
1242
1404
|
*/
|
|
1243
|
-
declare function streamText<TOOLS extends ToolSet, OUTPUT =
|
|
1405
|
+
declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
|
1244
1406
|
/**
|
|
1245
1407
|
The language model to use.
|
|
1246
1408
|
*/
|
|
@@ -1282,7 +1444,13 @@ functionality that can be fully encapsulated in the provider.
|
|
|
1282
1444
|
/**
|
|
1283
1445
|
Optional specification for parsing structured outputs from the LLM response.
|
|
1284
1446
|
*/
|
|
1285
|
-
|
|
1447
|
+
output?: OUTPUT;
|
|
1448
|
+
/**
|
|
1449
|
+
Optional specification for parsing structured outputs from the LLM response.
|
|
1450
|
+
|
|
1451
|
+
@deprecated Use `output` instead.
|
|
1452
|
+
*/
|
|
1453
|
+
experimental_output?: OUTPUT;
|
|
1286
1454
|
/**
|
|
1287
1455
|
Optional function that you can use to provide different settings for a step.
|
|
1288
1456
|
|
|
@@ -1357,7 +1525,24 @@ Internal. For test use only. May change without notice.
|
|
|
1357
1525
|
generateId?: IdGenerator;
|
|
1358
1526
|
currentDate?: () => Date;
|
|
1359
1527
|
};
|
|
1360
|
-
}): StreamTextResult<TOOLS,
|
|
1528
|
+
}): StreamTextResult<TOOLS, OUTPUT>;
|
|
1529
|
+
|
|
1530
|
+
/**
|
|
1531
|
+
* Tool output when the tool execution has been denied (for static tools).
|
|
1532
|
+
*/
|
|
1533
|
+
type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
|
|
1534
|
+
[NAME in keyof TOOLS]: {
|
|
1535
|
+
type: 'tool-output-denied';
|
|
1536
|
+
toolCallId: string;
|
|
1537
|
+
toolName: NAME & string;
|
|
1538
|
+
providerExecuted?: boolean;
|
|
1539
|
+
dynamic?: false | undefined;
|
|
1540
|
+
};
|
|
1541
|
+
}>;
|
|
1542
|
+
/**
|
|
1543
|
+
* Tool output when the tool execution has been denied.
|
|
1544
|
+
*/
|
|
1545
|
+
type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
|
|
1361
1546
|
|
|
1362
1547
|
/**
|
|
1363
1548
|
The data types that can be used in the UI message for the UI message data parts.
|
|
@@ -1506,42 +1691,95 @@ type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
|
|
|
1506
1691
|
};
|
|
1507
1692
|
}>;
|
|
1508
1693
|
type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
|
|
1694
|
+
/**
|
|
1695
|
+
* Check if a message part is a data part.
|
|
1696
|
+
*/
|
|
1697
|
+
declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
|
|
1509
1698
|
/**
|
|
1510
1699
|
* A UI tool invocation contains all the information needed to render a tool invocation in the UI.
|
|
1511
1700
|
* It can be derived from a tool without knowing the tool name, and can be used to define
|
|
1512
1701
|
* UI components for the tool.
|
|
1513
1702
|
*/
|
|
1514
1703
|
type UIToolInvocation<TOOL extends UITool | Tool> = {
|
|
1704
|
+
/**
|
|
1705
|
+
* ID of the tool call.
|
|
1706
|
+
*/
|
|
1515
1707
|
toolCallId: string;
|
|
1708
|
+
title?: string;
|
|
1709
|
+
/**
|
|
1710
|
+
* Whether the tool call was executed by the provider.
|
|
1711
|
+
*/
|
|
1712
|
+
providerExecuted?: boolean;
|
|
1516
1713
|
} & ({
|
|
1517
1714
|
state: 'input-streaming';
|
|
1518
1715
|
input: DeepPartial<asUITool<TOOL>['input']> | undefined;
|
|
1519
|
-
providerExecuted?: boolean;
|
|
1520
1716
|
output?: never;
|
|
1521
1717
|
errorText?: never;
|
|
1718
|
+
approval?: never;
|
|
1522
1719
|
} | {
|
|
1523
1720
|
state: 'input-available';
|
|
1524
1721
|
input: asUITool<TOOL>['input'];
|
|
1525
|
-
providerExecuted?: boolean;
|
|
1526
1722
|
output?: never;
|
|
1527
1723
|
errorText?: never;
|
|
1528
1724
|
callProviderMetadata?: ProviderMetadata;
|
|
1725
|
+
approval?: never;
|
|
1726
|
+
} | {
|
|
1727
|
+
state: 'approval-requested';
|
|
1728
|
+
input: asUITool<TOOL>['input'];
|
|
1729
|
+
output?: never;
|
|
1730
|
+
errorText?: never;
|
|
1731
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1732
|
+
approval: {
|
|
1733
|
+
id: string;
|
|
1734
|
+
approved?: never;
|
|
1735
|
+
reason?: never;
|
|
1736
|
+
};
|
|
1737
|
+
} | {
|
|
1738
|
+
state: 'approval-responded';
|
|
1739
|
+
input: asUITool<TOOL>['input'];
|
|
1740
|
+
output?: never;
|
|
1741
|
+
errorText?: never;
|
|
1742
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1743
|
+
approval: {
|
|
1744
|
+
id: string;
|
|
1745
|
+
approved: boolean;
|
|
1746
|
+
reason?: string;
|
|
1747
|
+
};
|
|
1529
1748
|
} | {
|
|
1530
1749
|
state: 'output-available';
|
|
1531
1750
|
input: asUITool<TOOL>['input'];
|
|
1532
1751
|
output: asUITool<TOOL>['output'];
|
|
1533
1752
|
errorText?: never;
|
|
1534
|
-
providerExecuted?: boolean;
|
|
1535
1753
|
callProviderMetadata?: ProviderMetadata;
|
|
1536
1754
|
preliminary?: boolean;
|
|
1755
|
+
approval?: {
|
|
1756
|
+
id: string;
|
|
1757
|
+
approved: true;
|
|
1758
|
+
reason?: string;
|
|
1759
|
+
};
|
|
1537
1760
|
} | {
|
|
1538
1761
|
state: 'output-error';
|
|
1539
1762
|
input: asUITool<TOOL>['input'] | undefined;
|
|
1540
1763
|
rawInput?: unknown;
|
|
1541
1764
|
output?: never;
|
|
1542
1765
|
errorText: string;
|
|
1543
|
-
providerExecuted?: boolean;
|
|
1544
1766
|
callProviderMetadata?: ProviderMetadata;
|
|
1767
|
+
approval?: {
|
|
1768
|
+
id: string;
|
|
1769
|
+
approved: true;
|
|
1770
|
+
reason?: string;
|
|
1771
|
+
};
|
|
1772
|
+
} | {
|
|
1773
|
+
state: 'output-denied';
|
|
1774
|
+
input: asUITool<TOOL>['input'];
|
|
1775
|
+
output?: never;
|
|
1776
|
+
errorText?: never;
|
|
1777
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1778
|
+
approval: {
|
|
1779
|
+
id: string;
|
|
1780
|
+
approved: false;
|
|
1781
|
+
reason?: string;
|
|
1782
|
+
};
|
|
1545
1783
|
});
|
|
1546
1784
|
type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
|
|
1547
1785
|
[NAME in keyof TOOLS & string]: {
|
|
@@ -1550,19 +1788,54 @@ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
|
|
|
1550
1788
|
}>;
|
|
1551
1789
|
type DynamicToolUIPart = {
|
|
1552
1790
|
type: 'dynamic-tool';
|
|
1791
|
+
/**
|
|
1792
|
+
* Name of the tool that is being called.
|
|
1793
|
+
*/
|
|
1553
1794
|
toolName: string;
|
|
1795
|
+
/**
|
|
1796
|
+
* ID of the tool call.
|
|
1797
|
+
*/
|
|
1554
1798
|
toolCallId: string;
|
|
1799
|
+
title?: string;
|
|
1800
|
+
/**
|
|
1801
|
+
* Whether the tool call was executed by the provider.
|
|
1802
|
+
*/
|
|
1803
|
+
providerExecuted?: boolean;
|
|
1555
1804
|
} & ({
|
|
1556
1805
|
state: 'input-streaming';
|
|
1557
1806
|
input: unknown | undefined;
|
|
1558
1807
|
output?: never;
|
|
1559
1808
|
errorText?: never;
|
|
1809
|
+
approval?: never;
|
|
1560
1810
|
} | {
|
|
1561
1811
|
state: 'input-available';
|
|
1562
1812
|
input: unknown;
|
|
1563
1813
|
output?: never;
|
|
1564
1814
|
errorText?: never;
|
|
1565
1815
|
callProviderMetadata?: ProviderMetadata;
|
|
1816
|
+
approval?: never;
|
|
1817
|
+
} | {
|
|
1818
|
+
state: 'approval-requested';
|
|
1819
|
+
input: unknown;
|
|
1820
|
+
output?: never;
|
|
1821
|
+
errorText?: never;
|
|
1822
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1823
|
+
approval: {
|
|
1824
|
+
id: string;
|
|
1825
|
+
approved?: never;
|
|
1826
|
+
reason?: never;
|
|
1827
|
+
};
|
|
1828
|
+
} | {
|
|
1829
|
+
state: 'approval-responded';
|
|
1830
|
+
input: unknown;
|
|
1831
|
+
output?: never;
|
|
1832
|
+
errorText?: never;
|
|
1833
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1834
|
+
approval: {
|
|
1835
|
+
id: string;
|
|
1836
|
+
approved: boolean;
|
|
1837
|
+
reason?: string;
|
|
1838
|
+
};
|
|
1566
1839
|
} | {
|
|
1567
1840
|
state: 'output-available';
|
|
1568
1841
|
input: unknown;
|
|
@@ -1570,13 +1843,46 @@ type DynamicToolUIPart = {
|
|
|
1570
1843
|
errorText?: never;
|
|
1571
1844
|
callProviderMetadata?: ProviderMetadata;
|
|
1572
1845
|
preliminary?: boolean;
|
|
1846
|
+
approval?: {
|
|
1847
|
+
id: string;
|
|
1848
|
+
approved: true;
|
|
1849
|
+
reason?: string;
|
|
1850
|
+
};
|
|
1573
1851
|
} | {
|
|
1574
1852
|
state: 'output-error';
|
|
1575
1853
|
input: unknown;
|
|
1576
1854
|
output?: never;
|
|
1577
1855
|
errorText: string;
|
|
1578
1856
|
callProviderMetadata?: ProviderMetadata;
|
|
1857
|
+
approval?: {
|
|
1858
|
+
id: string;
|
|
1859
|
+
approved: true;
|
|
1860
|
+
reason?: string;
|
|
1861
|
+
};
|
|
1862
|
+
} | {
|
|
1863
|
+
state: 'output-denied';
|
|
1864
|
+
input: unknown;
|
|
1865
|
+
output?: never;
|
|
1866
|
+
errorText?: never;
|
|
1867
|
+
callProviderMetadata?: ProviderMetadata;
|
|
1868
|
+
approval: {
|
|
1869
|
+
id: string;
|
|
1870
|
+
approved: false;
|
|
1871
|
+
reason?: string;
|
|
1872
|
+
};
|
|
1579
1873
|
});
|
|
1874
|
+
/**
|
|
1875
|
+
* Type guard to check if a message part is a text part.
|
|
1876
|
+
*/
|
|
1877
|
+
declare function isTextUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is TextUIPart;
|
|
1878
|
+
/**
|
|
1879
|
+
* Type guard to check if a message part is a file part.
|
|
1880
|
+
*/
|
|
1881
|
+
declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is FileUIPart;
|
|
1882
|
+
/**
|
|
1883
|
+
* Type guard to check if a message part is a reasoning part.
|
|
1884
|
+
*/
|
|
1885
|
+
declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
|
|
1580
1886
|
declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
|
|
1581
1887
|
declare function isToolOrDynamicToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
|
|
1582
1888
|
declare function getToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
|
|
@@ -1594,121 +1900,132 @@ type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
|
|
|
1594
1900
|
dynamic: true;
|
|
1595
1901
|
});
|
|
1596
1902
|
|
|
1597
|
-
declare const uiMessageChunkSchema:
|
|
1598
|
-
type:
|
|
1599
|
-
id:
|
|
1600
|
-
providerMetadata
|
|
1601
|
-
}
|
|
1602
|
-
type:
|
|
1603
|
-
id:
|
|
1604
|
-
delta:
|
|
1605
|
-
providerMetadata
|
|
1606
|
-
}
|
|
1607
|
-
type:
|
|
1608
|
-
id:
|
|
1609
|
-
providerMetadata
|
|
1610
|
-
}
|
|
1611
|
-
type:
|
|
1612
|
-
errorText:
|
|
1613
|
-
}
|
|
1614
|
-
type:
|
|
1615
|
-
toolCallId:
|
|
1616
|
-
toolName:
|
|
1617
|
-
providerExecuted
|
|
1618
|
-
dynamic
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
}
|
|
1648
|
-
type:
|
|
1649
|
-
toolCallId:
|
|
1650
|
-
|
|
1651
|
-
providerExecuted
|
|
1652
|
-
dynamic
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
providerMetadata
|
|
1672
|
-
}
|
|
1673
|
-
type:
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
}
|
|
1690
|
-
type:
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1903
|
+
declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
1904
|
+
type: "text-start";
|
|
1905
|
+
id: string;
|
|
1906
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1907
|
+
} | {
|
|
1908
|
+
type: "text-delta";
|
|
1909
|
+
id: string;
|
|
1910
|
+
delta: string;
|
|
1911
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1912
|
+
} | {
|
|
1913
|
+
type: "text-end";
|
|
1914
|
+
id: string;
|
|
1915
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1916
|
+
} | {
|
|
1917
|
+
type: "error";
|
|
1918
|
+
errorText: string;
|
|
1919
|
+
} | {
|
|
1920
|
+
type: "tool-input-start";
|
|
1921
|
+
toolCallId: string;
|
|
1922
|
+
toolName: string;
|
|
1923
|
+
providerExecuted?: boolean | undefined;
|
|
1924
|
+
dynamic?: boolean | undefined;
|
|
1925
|
+
title?: string | undefined;
|
|
1926
|
+
} | {
|
|
1927
|
+
type: "tool-input-delta";
|
|
1928
|
+
toolCallId: string;
|
|
1929
|
+
inputTextDelta: string;
|
|
1930
|
+
} | {
|
|
1931
|
+
type: "tool-input-available";
|
|
1932
|
+
toolCallId: string;
|
|
1933
|
+
toolName: string;
|
|
1934
|
+
input: unknown;
|
|
1935
|
+
providerExecuted?: boolean | undefined;
|
|
1936
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1937
|
+
dynamic?: boolean | undefined;
|
|
1938
|
+
title?: string | undefined;
|
|
1939
|
+
} | {
|
|
1940
|
+
type: "tool-input-error";
|
|
1941
|
+
toolCallId: string;
|
|
1942
|
+
toolName: string;
|
|
1943
|
+
input: unknown;
|
|
1944
|
+
errorText: string;
|
|
1945
|
+
providerExecuted?: boolean | undefined;
|
|
1946
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1947
|
+
dynamic?: boolean | undefined;
|
|
1948
|
+
title?: string | undefined;
|
|
1949
|
+
} | {
|
|
1950
|
+
type: "tool-approval-request";
|
|
1951
|
+
approvalId: string;
|
|
1952
|
+
toolCallId: string;
|
|
1953
|
+
} | {
|
|
1954
|
+
type: "tool-output-available";
|
|
1955
|
+
toolCallId: string;
|
|
1956
|
+
output: unknown;
|
|
1957
|
+
providerExecuted?: boolean | undefined;
|
|
1958
|
+
dynamic?: boolean | undefined;
|
|
1959
|
+
preliminary?: boolean | undefined;
|
|
1960
|
+
} | {
|
|
1961
|
+
type: "tool-output-error";
|
|
1962
|
+
toolCallId: string;
|
|
1963
|
+
errorText: string;
|
|
1964
|
+
providerExecuted?: boolean | undefined;
|
|
1965
|
+
dynamic?: boolean | undefined;
|
|
1966
|
+
} | {
|
|
1967
|
+
type: "tool-output-denied";
|
|
1968
|
+
toolCallId: string;
|
|
1969
|
+
} | {
|
|
1970
|
+
type: "reasoning-start";
|
|
1971
|
+
id: string;
|
|
1972
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1973
|
+
} | {
|
|
1974
|
+
type: "reasoning-delta";
|
|
1975
|
+
id: string;
|
|
1976
|
+
delta: string;
|
|
1977
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1978
|
+
} | {
|
|
1979
|
+
type: "reasoning-end";
|
|
1980
|
+
id: string;
|
|
1981
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1982
|
+
} | {
|
|
1983
|
+
type: "source-url";
|
|
1984
|
+
sourceId: string;
|
|
1985
|
+
url: string;
|
|
1986
|
+
title?: string | undefined;
|
|
1987
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1988
|
+
} | {
|
|
1989
|
+
type: "source-document";
|
|
1990
|
+
sourceId: string;
|
|
1991
|
+
mediaType: string;
|
|
1992
|
+
title: string;
|
|
1993
|
+
filename?: string | undefined;
|
|
1994
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
1995
|
+
} | {
|
|
1996
|
+
type: "file";
|
|
1997
|
+
url: string;
|
|
1998
|
+
mediaType: string;
|
|
1999
|
+
providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
|
|
2000
|
+
} | {
|
|
2001
|
+
type: `data-${string}`;
|
|
2002
|
+
data: unknown;
|
|
2003
|
+
id?: string | undefined;
|
|
2004
|
+
transient?: boolean | undefined;
|
|
2005
|
+
} | {
|
|
2006
|
+
type: "start-step";
|
|
2007
|
+
} | {
|
|
2008
|
+
type: "finish-step";
|
|
2009
|
+
} | {
|
|
2010
|
+
type: "start";
|
|
2011
|
+
messageId?: string | undefined;
|
|
2012
|
+
messageMetadata?: unknown;
|
|
2013
|
+
} | {
|
|
2014
|
+
type: "finish";
|
|
2015
|
+
finishReason?: "unknown" | "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
|
|
2016
|
+
messageMetadata?: unknown;
|
|
2017
|
+
} | {
|
|
2018
|
+
type: "abort";
|
|
2019
|
+
} | {
|
|
2020
|
+
type: "message-metadata";
|
|
2021
|
+
messageMetadata: unknown;
|
|
2022
|
+
}>;
|
|
2023
|
+
type DataUIMessageChunk<DATA_TYPES extends UIDataTypes> = ValueOf<{
|
|
2024
|
+
[NAME in keyof DATA_TYPES & string]: {
|
|
2025
|
+
type: `data-${NAME}`;
|
|
2026
|
+
id?: string;
|
|
2027
|
+
data: DATA_TYPES[NAME];
|
|
2028
|
+
transient?: boolean;
|
|
1712
2029
|
};
|
|
1713
2030
|
}>;
|
|
1714
2031
|
type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
|
|
@@ -1748,6 +2065,7 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1748
2065
|
providerExecuted?: boolean;
|
|
1749
2066
|
providerMetadata?: ProviderMetadata;
|
|
1750
2067
|
dynamic?: boolean;
|
|
2068
|
+
title?: string;
|
|
1751
2069
|
} | {
|
|
1752
2070
|
type: 'tool-input-error';
|
|
1753
2071
|
toolCallId: string;
|
|
@@ -1757,6 +2075,11 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1757
2075
|
providerMetadata?: ProviderMetadata;
|
|
1758
2076
|
dynamic?: boolean;
|
|
1759
2077
|
errorText: string;
|
|
2078
|
+
title?: string;
|
|
2079
|
+
} | {
|
|
2080
|
+
type: 'tool-approval-request';
|
|
2081
|
+
approvalId: string;
|
|
2082
|
+
toolCallId: string;
|
|
1760
2083
|
} | {
|
|
1761
2084
|
type: 'tool-output-available';
|
|
1762
2085
|
toolCallId: string;
|
|
@@ -1770,12 +2093,16 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1770
2093
|
errorText: string;
|
|
1771
2094
|
providerExecuted?: boolean;
|
|
1772
2095
|
dynamic?: boolean;
|
|
2096
|
+
} | {
|
|
2097
|
+
type: 'tool-output-denied';
|
|
2098
|
+
toolCallId: string;
|
|
1773
2099
|
} | {
|
|
1774
2100
|
type: 'tool-input-start';
|
|
1775
2101
|
toolCallId: string;
|
|
1776
2102
|
toolName: string;
|
|
1777
2103
|
providerExecuted?: boolean;
|
|
1778
2104
|
dynamic?: boolean;
|
|
2105
|
+
title?: string;
|
|
1779
2106
|
} | {
|
|
1780
2107
|
type: 'tool-input-delta';
|
|
1781
2108
|
toolCallId: string;
|
|
@@ -1808,6 +2135,7 @@ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataT
|
|
|
1808
2135
|
messageMetadata?: METADATA;
|
|
1809
2136
|
} | {
|
|
1810
2137
|
type: 'finish';
|
|
2138
|
+
finishReason?: FinishReason;
|
|
1811
2139
|
messageMetadata?: METADATA;
|
|
1812
2140
|
} | {
|
|
1813
2141
|
type: 'abort';
|
|
@@ -1836,6 +2164,10 @@ type UIMessageStreamOnFinishCallback<UI_MESSAGE extends UIMessage> = (event: {
|
|
|
1836
2164
|
* (including the original message if it was extended).
|
|
1837
2165
|
*/
|
|
1838
2166
|
responseMessage: UI_MESSAGE;
|
|
2167
|
+
/**
|
|
2168
|
+
* The reason why the generation finished.
|
|
2169
|
+
*/
|
|
2170
|
+
finishReason?: FinishReason;
|
|
1839
2171
|
}) => PromiseLike<void> | void;
|
|
1840
2172
|
|
|
1841
2173
|
type UIMessageStreamResponseInit = ResponseInit & {
|
|
@@ -1911,7 +2243,7 @@ type ConsumeStreamOptions = {
|
|
|
1911
2243
|
/**
|
|
1912
2244
|
A result object for accessing different stream types and additional information.
|
|
1913
2245
|
*/
|
|
1914
|
-
interface StreamTextResult<TOOLS extends ToolSet,
|
|
2246
|
+
interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
|
|
1915
2247
|
/**
|
|
1916
2248
|
The content that was generated in the last step.
|
|
1917
2249
|
|
|
@@ -2059,9 +2391,19 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
|
2059
2391
|
*/
|
|
2060
2392
|
readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
|
|
2061
2393
|
/**
|
|
2062
|
-
|
|
2394
|
+
* A stream of partial outputs. It uses the `output` specification.
|
|
2395
|
+
*
|
|
2396
|
+
* @deprecated Use `partialOutputStream` instead.
|
|
2397
|
+
*/
|
|
2398
|
+
readonly experimental_partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
|
|
2399
|
+
/**
|
|
2400
|
+
* A stream of partial parsed outputs. It uses the `output` specification.
|
|
2063
2401
|
*/
|
|
2064
|
-
readonly
|
|
2402
|
+
readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
|
|
2403
|
+
/**
|
|
2404
|
+
* The complete parsed output. It uses the `output` specification.
|
|
2405
|
+
*/
|
|
2406
|
+
readonly output: Promise<InferCompleteOutput<OUTPUT>>;
|
|
2065
2407
|
/**
|
|
2066
2408
|
Consumes the stream without processing the parts.
|
|
2067
2409
|
This is useful to force the stream to finish.
|
|
@@ -2072,47 +2414,28 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
|
2072
2414
|
*/
|
|
2073
2415
|
consumeStream(options?: ConsumeStreamOptions): Promise<void>;
|
|
2074
2416
|
/**
|
|
2075
|
-
|
|
2417
|
+
Converts the result to a UI message stream.
|
|
2076
2418
|
|
|
2077
|
-
|
|
2078
|
-
@param options.sendUsage whether to send the usage information to the client. Defaults to true.
|
|
2079
|
-
@param options.sendReasoning whether to send the reasoning information to the client. Defaults to false.
|
|
2080
|
-
@param options.sendSources whether to send the sources information to the client. Defaults to false.
|
|
2081
|
-
@param options.experimental_sendFinish whether to send the finish information to the client. Defaults to true.
|
|
2082
|
-
@param options.experimental_sendStart whether to send the start information to the client. Defaults to true.
|
|
2083
|
-
|
|
2084
|
-
@return A UI message stream.
|
|
2419
|
+
@return A UI message stream.
|
|
2085
2420
|
*/
|
|
2086
2421
|
toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
|
2087
2422
|
/**
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
@param options.status The status code.
|
|
2091
|
-
@param options.statusText The status text.
|
|
2092
|
-
@param options.headers The headers.
|
|
2093
|
-
@param options.getErrorMessage An optional function that converts an error to an error message.
|
|
2094
|
-
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
|
2095
|
-
@param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
|
|
2096
|
-
*/
|
|
2423
|
+
*Writes UI message stream output to a Node.js response-like object.
|
|
2424
|
+
*/
|
|
2097
2425
|
pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
|
|
2098
2426
|
/**
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2427
|
+
Writes text delta output to a Node.js response-like object.
|
|
2428
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
|
2429
|
+
writes each text delta as a separate chunk.
|
|
2430
|
+
|
|
2431
|
+
@param response A Node.js response-like object (ServerResponse).
|
|
2432
|
+
@param init Optional headers, status code, and status text.
|
|
2104
2433
|
*/
|
|
2105
2434
|
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
|
2106
2435
|
/**
|
|
2107
|
-
|
|
2436
|
+
Converts the result to a streamed response object with a stream data part stream.
|
|
2108
2437
|
|
|
2109
|
-
|
|
2110
|
-
@param options.statusText The status text.
|
|
2111
|
-
@param options.headers The headers.
|
|
2112
|
-
@param options.getErrorMessage An optional function that converts an error to an error message.
|
|
2113
|
-
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
|
2114
|
-
@param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
|
|
2115
|
-
@return A response object.
|
|
2438
|
+
@return A response object.
|
|
2116
2439
|
*/
|
|
2117
2440
|
toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
|
|
2118
2441
|
/**
|
|
@@ -2156,6 +2479,7 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2156
2479
|
providerMetadata?: ProviderMetadata;
|
|
2157
2480
|
providerExecuted?: boolean;
|
|
2158
2481
|
dynamic?: boolean;
|
|
2482
|
+
title?: string;
|
|
2159
2483
|
} | {
|
|
2160
2484
|
type: 'tool-input-end';
|
|
2161
2485
|
id: string;
|
|
@@ -2176,7 +2500,9 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2176
2500
|
type: 'tool-result';
|
|
2177
2501
|
} & TypedToolResult<TOOLS>) | ({
|
|
2178
2502
|
type: 'tool-error';
|
|
2179
|
-
} & TypedToolError<TOOLS>) | {
|
|
2503
|
+
} & TypedToolError<TOOLS>) | ({
|
|
2504
|
+
type: 'tool-output-denied';
|
|
2505
|
+
} & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
|
|
2180
2506
|
type: 'start-step';
|
|
2181
2507
|
request: LanguageModelRequestMetadata;
|
|
2182
2508
|
warnings: CallWarning[];
|
|
@@ -2202,11 +2528,108 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2202
2528
|
rawValue: unknown;
|
|
2203
2529
|
};
|
|
2204
2530
|
|
|
2205
|
-
type
|
|
2531
|
+
type AgentCallParameters<CALL_OPTIONS> = ([CALL_OPTIONS] extends [never] ? {
|
|
2532
|
+
options?: never;
|
|
2533
|
+
} : {
|
|
2534
|
+
options: CALL_OPTIONS;
|
|
2535
|
+
}) & ({
|
|
2206
2536
|
/**
|
|
2207
|
-
*
|
|
2537
|
+
* A prompt. It can be either a text prompt or a list of messages.
|
|
2538
|
+
*
|
|
2539
|
+
* You can either use `prompt` or `messages` but not both.
|
|
2208
2540
|
*/
|
|
2209
|
-
|
|
2541
|
+
prompt: string | Array<ModelMessage>;
|
|
2542
|
+
/**
|
|
2543
|
+
* A list of messages.
|
|
2544
|
+
*
|
|
2545
|
+
* You can either use `prompt` or `messages` but not both.
|
|
2546
|
+
*/
|
|
2547
|
+
messages?: never;
|
|
2548
|
+
} | {
|
|
2549
|
+
/**
|
|
2550
|
+
* A list of messages.
|
|
2551
|
+
*
|
|
2552
|
+
* You can either use `prompt` or `messages` but not both.
|
|
2553
|
+
*/
|
|
2554
|
+
messages: Array<ModelMessage>;
|
|
2555
|
+
/**
|
|
2556
|
+
* A prompt. It can be either a text prompt or a list of messages.
|
|
2557
|
+
*
|
|
2558
|
+
* You can either use `prompt` or `messages` but not both.
|
|
2559
|
+
*/
|
|
2560
|
+
prompt?: never;
|
|
2561
|
+
}) & {
|
|
2562
|
+
/**
|
|
2563
|
+
* Abort signal.
|
|
2564
|
+
*/
|
|
2565
|
+
abortSignal?: AbortSignal;
|
|
2566
|
+
};
|
|
2567
|
+
/**
|
|
2568
|
+
* An Agent receives a prompt (text or messages) and generates or streams an output
|
|
2569
|
+
* that consists of steps, tool calls, data parts, etc.
|
|
2570
|
+
*
|
|
2571
|
+
* You can implement your own Agent by implementing the `Agent` interface,
|
|
2572
|
+
* or use the `ToolLoopAgent` class.
|
|
2573
|
+
*/
|
|
2574
|
+
interface Agent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
|
|
2575
|
+
/**
|
|
2576
|
+
* The specification version of the agent interface. This will enable
|
|
2577
|
+
* us to evolve the agent interface and retain backwards compatibility.
|
|
2578
|
+
*/
|
|
2579
|
+
readonly version: 'agent-v1';
|
|
2580
|
+
/**
|
|
2581
|
+
* The id of the agent.
|
|
2582
|
+
*/
|
|
2583
|
+
readonly id: string | undefined;
|
|
2584
|
+
/**
|
|
2585
|
+
* The tools that the agent can use.
|
|
2586
|
+
*/
|
|
2587
|
+
readonly tools: TOOLS;
|
|
2588
|
+
/**
|
|
2589
|
+
* Generates an output from the agent (non-streaming).
|
|
2590
|
+
*/
|
|
2591
|
+
generate(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2592
|
+
/**
|
|
2593
|
+
* Streams an output from the agent (streaming).
|
|
2594
|
+
*/
|
|
2595
|
+
stream(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<StreamTextResult<TOOLS, OUTPUT>>;
|
|
2596
|
+
}
|
|
2597
|
+
|
|
2598
|
+
/**
|
|
2599
|
+
Callback that is set using the `onFinish` option.
|
|
2600
|
+
|
|
2601
|
+
@param event - The event that is passed to the callback.
|
|
2602
|
+
*/
|
|
2603
|
+
type ToolLoopAgentOnFinishCallback<TOOLS extends ToolSet = {}> = (event: StepResult<TOOLS> & {
|
|
2604
|
+
/**
|
|
2605
|
+
Details for all steps.
|
|
2606
|
+
*/
|
|
2607
|
+
readonly steps: StepResult<TOOLS>[];
|
|
2608
|
+
/**
|
|
2609
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
|
2610
|
+
*/
|
|
2611
|
+
readonly totalUsage: LanguageModelUsage;
|
|
2612
|
+
}) => PromiseLike<void> | void;
|
|
2613
|
+
|
|
2614
|
+
/**
|
|
2615
|
+
Callback that is set using the `onStepFinish` option.
|
|
2616
|
+
|
|
2617
|
+
@param stepResult - The result of the step.
|
|
2618
|
+
*/
|
|
2619
|
+
type ToolLoopAgentOnStepFinishCallback<TOOLS extends ToolSet = {}> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
2620
|
+
|
|
2621
|
+
/**
|
|
2622
|
+
* Configuration options for an agent.
|
|
2623
|
+
*/
|
|
2624
|
+
type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = Omit<CallSettings, 'abortSignal'> & {
|
|
2625
|
+
/**
|
|
2626
|
+
* The id of the agent.
|
|
2627
|
+
*/
|
|
2628
|
+
id?: string;
|
|
2629
|
+
/**
|
|
2630
|
+
* The instructions for the agent.
|
|
2631
|
+
*/
|
|
2632
|
+
instructions?: string;
|
|
2210
2633
|
/**
|
|
2211
2634
|
The language model to use.
|
|
2212
2635
|
*/
|
|
@@ -2223,7 +2646,7 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2223
2646
|
Condition for stopping the generation when there are tool results in the last step.
|
|
2224
2647
|
When the condition is an array, any of the conditions can be met to stop the generation.
|
|
2225
2648
|
|
|
2226
|
-
@default stepCountIs(
|
|
2649
|
+
@default stepCountIs(20)
|
|
2227
2650
|
*/
|
|
2228
2651
|
stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
|
|
2229
2652
|
/**
|
|
@@ -2236,13 +2659,9 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2236
2659
|
*/
|
|
2237
2660
|
activeTools?: Array<keyof NoInfer<TOOLS>>;
|
|
2238
2661
|
/**
|
|
2239
|
-
Optional specification for
|
|
2240
|
-
*/
|
|
2241
|
-
experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
|
|
2242
|
-
/**
|
|
2243
|
-
* @deprecated Use `prepareStep` instead.
|
|
2662
|
+
Optional specification for generating structured outputs.
|
|
2244
2663
|
*/
|
|
2245
|
-
|
|
2664
|
+
output?: OUTPUT;
|
|
2246
2665
|
/**
|
|
2247
2666
|
Optional function that you can use to provide different settings for a step.
|
|
2248
2667
|
*/
|
|
@@ -2252,9 +2671,19 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2252
2671
|
*/
|
|
2253
2672
|
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
|
|
2254
2673
|
/**
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
onStepFinish?:
|
|
2674
|
+
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
2675
|
+
*/
|
|
2676
|
+
onStepFinish?: ToolLoopAgentOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
2677
|
+
/**
|
|
2678
|
+
* Callback that is called when all steps are finished and the response is complete.
|
|
2679
|
+
*/
|
|
2680
|
+
onFinish?: ToolLoopAgentOnFinishCallback<NoInfer<TOOLS>>;
|
|
2681
|
+
/**
|
|
2682
|
+
Additional provider-specific options. They are passed through
|
|
2683
|
+
to the provider from the AI SDK and enable provider-specific
|
|
2684
|
+
functionality that can be fully encapsulated in the provider.
|
|
2685
|
+
*/
|
|
2686
|
+
providerOptions?: ProviderOptions;
|
|
2258
2687
|
/**
|
|
2259
2688
|
* Context that is passed into tool calls.
|
|
2260
2689
|
*
|
|
@@ -2264,222 +2693,924 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2264
2693
|
*/
|
|
2265
2694
|
experimental_context?: unknown;
|
|
2266
2695
|
/**
|
|
2267
|
-
*
|
|
2696
|
+
* The schema for the call options.
|
|
2268
2697
|
*/
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2698
|
+
callOptionsSchema?: FlexibleSchema<CALL_OPTIONS>;
|
|
2699
|
+
/**
|
|
2700
|
+
* Prepare the parameters for the generateText or streamText call.
|
|
2701
|
+
*
|
|
2702
|
+
* You can use this to have templates based on call options.
|
|
2703
|
+
*/
|
|
2704
|
+
prepareCall?: (options: AgentCallParameters<CALL_OPTIONS> & Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'>) => MaybePromiseLike<Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'> & Omit<Prompt, 'system'>>;
|
|
2273
2705
|
};
|
|
2274
|
-
|
|
2706
|
+
|
|
2707
|
+
/**
|
|
2708
|
+
* A tool loop agent is an agent that runs tools in a loop. In each step,
|
|
2709
|
+
* it calls the LLM, and if there are tool calls, it executes the tools
|
|
2710
|
+
* and calls the LLM again in a new step with the tool results.
|
|
2711
|
+
*
|
|
2712
|
+
* The loop continues until:
|
|
2713
|
+
* - A finish reasoning other than tool-calls is returned, or
|
|
2714
|
+
* - A tool that is invoked does not have an execute function, or
|
|
2715
|
+
* - A tool call needs approval, or
|
|
2716
|
+
* - A stop condition is met (default stop condition is stepCountIs(20))
|
|
2717
|
+
*/
|
|
2718
|
+
declare class ToolLoopAgent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<CALL_OPTIONS, TOOLS, OUTPUT> {
|
|
2719
|
+
readonly version = "agent-v1";
|
|
2275
2720
|
private readonly settings;
|
|
2276
|
-
constructor(settings:
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
/**
|
|
2280
|
-
Additional provider-specific metadata. They are passed through
|
|
2281
|
-
from the provider to the AI SDK and enable provider-specific
|
|
2282
|
-
results that can be fully encapsulated in the provider.
|
|
2721
|
+
constructor(settings: ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>);
|
|
2722
|
+
/**
|
|
2723
|
+
* The id of the agent.
|
|
2283
2724
|
*/
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
to the provider from the AI SDK and enable provider-specific
|
|
2288
|
-
functionality that can be fully encapsulated in the provider.
|
|
2289
|
-
*/
|
|
2290
|
-
providerOptions?: ProviderOptions;
|
|
2291
|
-
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2292
|
-
stream(options: Prompt & {
|
|
2293
|
-
/**
|
|
2294
|
-
Additional provider-specific metadata. They are passed through
|
|
2295
|
-
from the provider to the AI SDK and enable provider-specific
|
|
2296
|
-
results that can be fully encapsulated in the provider.
|
|
2725
|
+
get id(): string | undefined;
|
|
2726
|
+
/**
|
|
2727
|
+
* The tools that the agent can use.
|
|
2297
2728
|
*/
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
providerOptions?: ProviderOptions;
|
|
2305
|
-
}): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
|
2729
|
+
get tools(): TOOLS;
|
|
2730
|
+
private prepareCall;
|
|
2731
|
+
/**
|
|
2732
|
+
* Generates an output from the agent (non-streaming).
|
|
2733
|
+
*/
|
|
2734
|
+
generate({ abortSignal, ...options }: AgentCallParameters<CALL_OPTIONS>): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2306
2735
|
/**
|
|
2307
|
-
*
|
|
2736
|
+
* Streams an output from the agent (streaming).
|
|
2308
2737
|
*/
|
|
2309
|
-
|
|
2310
|
-
messages: UIMessage<never, never, InferUITools<TOOLS>>[];
|
|
2311
|
-
}): Response;
|
|
2738
|
+
stream({ abortSignal, ...options }: AgentCallParameters<CALL_OPTIONS>): Promise<StreamTextResult<TOOLS, OUTPUT>>;
|
|
2312
2739
|
}
|
|
2313
|
-
|
|
2740
|
+
|
|
2314
2741
|
/**
|
|
2315
|
-
* Infer the
|
|
2742
|
+
* Infer the type of the tools of an agent.
|
|
2316
2743
|
*/
|
|
2317
|
-
type
|
|
2744
|
+
type InferAgentTools<AGENT> = AGENT extends Agent<any, infer TOOLS, any> ? TOOLS : never;
|
|
2318
2745
|
|
|
2319
2746
|
/**
|
|
2320
|
-
|
|
2321
|
-
It contains the embedding, the value, and additional information.
|
|
2747
|
+
* Infer the UI message type of an agent.
|
|
2322
2748
|
*/
|
|
2323
|
-
|
|
2324
|
-
/**
|
|
2325
|
-
The value that was embedded.
|
|
2326
|
-
*/
|
|
2327
|
-
readonly value: VALUE;
|
|
2328
|
-
/**
|
|
2329
|
-
The embedding of the value.
|
|
2330
|
-
*/
|
|
2331
|
-
readonly embedding: Embedding;
|
|
2332
|
-
/**
|
|
2333
|
-
The embedding token usage.
|
|
2334
|
-
*/
|
|
2335
|
-
readonly usage: EmbeddingModelUsage;
|
|
2336
|
-
/**
|
|
2337
|
-
Optional provider-specific metadata.
|
|
2338
|
-
*/
|
|
2339
|
-
readonly providerMetadata?: ProviderMetadata;
|
|
2340
|
-
/**
|
|
2341
|
-
Optional response data.
|
|
2342
|
-
*/
|
|
2343
|
-
readonly response?: {
|
|
2344
|
-
/**
|
|
2345
|
-
Response headers.
|
|
2346
|
-
*/
|
|
2347
|
-
headers?: Record<string, string>;
|
|
2348
|
-
/**
|
|
2349
|
-
The response body.
|
|
2350
|
-
*/
|
|
2351
|
-
body?: unknown;
|
|
2352
|
-
};
|
|
2353
|
-
}
|
|
2749
|
+
type InferAgentUIMessage<AGENT> = UIMessage<never, never, InferUITools<InferAgentTools<AGENT>>>;
|
|
2354
2750
|
|
|
2355
2751
|
/**
|
|
2356
|
-
|
|
2357
|
-
|
|
2358
|
-
@param
|
|
2359
|
-
@param
|
|
2752
|
+
* Runs the agent and returns a response object with a UI message stream.
|
|
2753
|
+
*
|
|
2754
|
+
* @param agent - The agent to run.
|
|
2755
|
+
* @param messages - The input UI messages.
|
|
2756
|
+
*
|
|
2757
|
+
* @returns The response object.
|
|
2758
|
+
*/
|
|
2759
|
+
declare function createAgentUIStreamResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ headers, status, statusText, consumeSseStream, ...options }: {
|
|
2760
|
+
agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
|
|
2761
|
+
messages: unknown[];
|
|
2762
|
+
options?: CALL_OPTIONS;
|
|
2763
|
+
} & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<Response>;
|
|
2360
2764
|
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2765
|
+
declare const getOriginalFetch: () => typeof fetch;
|
|
2766
|
+
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
|
2767
|
+
api: string;
|
|
2768
|
+
prompt: string;
|
|
2769
|
+
credentials: RequestCredentials | undefined;
|
|
2770
|
+
headers: HeadersInit | undefined;
|
|
2771
|
+
body: Record<string, any>;
|
|
2772
|
+
streamProtocol: 'data' | 'text' | undefined;
|
|
2773
|
+
setCompletion: (completion: string) => void;
|
|
2774
|
+
setLoading: (loading: boolean) => void;
|
|
2775
|
+
setError: (error: Error | undefined) => void;
|
|
2776
|
+
setAbortController: (abortController: AbortController | null) => void;
|
|
2777
|
+
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
|
2778
|
+
onError: ((error: Error) => void) | undefined;
|
|
2779
|
+
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
|
2780
|
+
}): Promise<string | null | undefined>;
|
|
2364
2781
|
|
|
2365
|
-
|
|
2782
|
+
/**
|
|
2783
|
+
* Transport interface for handling chat message communication and streaming.
|
|
2784
|
+
*
|
|
2785
|
+
* The `ChatTransport` interface provides fine-grained control over how messages
|
|
2786
|
+
* are sent to API endpoints and how responses are processed. This enables
|
|
2787
|
+
* alternative communication protocols like WebSockets, custom authentication
|
|
2788
|
+
* patterns, or specialized backend integrations.
|
|
2789
|
+
*
|
|
2790
|
+
* @template UI_MESSAGE - The UI message type extending UIMessage
|
|
2366
2791
|
*/
|
|
2367
|
-
|
|
2368
|
-
/**
|
|
2369
|
-
The embedding model to use.
|
|
2370
|
-
*/
|
|
2371
|
-
model: EmbeddingModel<VALUE>;
|
|
2792
|
+
interface ChatTransport<UI_MESSAGE extends UIMessage> {
|
|
2372
2793
|
/**
|
|
2373
|
-
|
|
2794
|
+
* Sends messages to the chat API endpoint and returns a streaming response.
|
|
2795
|
+
*
|
|
2796
|
+
* This method handles both new message submission and message regeneration.
|
|
2797
|
+
* It supports real-time streaming of responses through UIMessageChunk events.
|
|
2798
|
+
*
|
|
2799
|
+
* @param options - Configuration object containing:
|
|
2800
|
+
* @param options.trigger - The type of message submission:
|
|
2801
|
+
* - `'submit-message'`: Submitting a new user message
|
|
2802
|
+
* - `'regenerate-message'`: Regenerating an assistant response
|
|
2803
|
+
* @param options.chatId - Unique identifier for the chat session
|
|
2804
|
+
* @param options.messageId - ID of the message to regenerate (for regenerate-message trigger) or undefined for new messages
|
|
2805
|
+
* @param options.messages - Array of UI messages representing the conversation history
|
|
2806
|
+
* @param options.abortSignal - Signal to abort the request if needed
|
|
2807
|
+
* @param options.headers - Additional HTTP headers to include in the request
|
|
2808
|
+
* @param options.body - Additional JSON properties to include in the request body
|
|
2809
|
+
* @param options.metadata - Custom metadata to attach to the request
|
|
2810
|
+
*
|
|
2811
|
+
* @returns Promise resolving to a ReadableStream of UIMessageChunk objects.
|
|
2812
|
+
* The stream emits various chunk types like:
|
|
2813
|
+
* - `text-start`, `text-delta`, `text-end`: For streaming text content
|
|
2814
|
+
* - `tool-input-start`, `tool-input-delta`, `tool-input-available`: For tool calls
|
|
2815
|
+
* - `data-part-start`, `data-part-delta`, `data-part-available`: For data parts
|
|
2816
|
+
* - `error`: For error handling
|
|
2817
|
+
*
|
|
2818
|
+
* @throws Error when the API request fails or response is invalid
|
|
2374
2819
|
*/
|
|
2375
|
-
|
|
2820
|
+
sendMessages: (options: {
|
|
2821
|
+
/** The type of message submission - either new message or regeneration */
|
|
2822
|
+
trigger: 'submit-message' | 'regenerate-message';
|
|
2823
|
+
/** Unique identifier for the chat session */
|
|
2824
|
+
chatId: string;
|
|
2825
|
+
/** ID of the message to regenerate, or undefined for new messages */
|
|
2826
|
+
messageId: string | undefined;
|
|
2827
|
+
/** Array of UI messages representing the conversation history */
|
|
2828
|
+
messages: UI_MESSAGE[];
|
|
2829
|
+
/** Signal to abort the request if needed */
|
|
2830
|
+
abortSignal: AbortSignal | undefined;
|
|
2831
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
|
|
2376
2832
|
/**
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2833
|
+
* Reconnects to an existing streaming response for the specified chat session.
|
|
2834
|
+
*
|
|
2835
|
+
* This method is used to resume streaming when a connection is interrupted
|
|
2836
|
+
* or when resuming a chat session. It's particularly useful for maintaining
|
|
2837
|
+
* continuity in long-running conversations or recovering from network issues.
|
|
2838
|
+
*
|
|
2839
|
+
* @param options - Configuration object containing:
|
|
2840
|
+
* @param options.chatId - Unique identifier for the chat session to reconnect to
|
|
2841
|
+
* @param options.headers - Additional HTTP headers to include in the reconnection request
|
|
2842
|
+
* @param options.body - Additional JSON properties to include in the request body
|
|
2843
|
+
* @param options.metadata - Custom metadata to attach to the request
|
|
2844
|
+
*
|
|
2845
|
+
* @returns Promise resolving to:
|
|
2846
|
+
* - `ReadableStream<UIMessageChunk>`: If an active stream is found and can be resumed
|
|
2847
|
+
* - `null`: If no active stream exists for the specified chat session (e.g., response already completed)
|
|
2848
|
+
*
|
|
2849
|
+
* @throws Error when the reconnection request fails or response is invalid
|
|
2380
2850
|
*/
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2851
|
+
reconnectToStream: (options: {
|
|
2852
|
+
/** Unique identifier for the chat session to reconnect to */
|
|
2853
|
+
chatId: string;
|
|
2854
|
+
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
|
2855
|
+
}
|
|
2856
|
+
|
|
2857
|
+
type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
|
|
2858
|
+
id?: UI_MESSAGE['id'];
|
|
2859
|
+
role?: UI_MESSAGE['role'];
|
|
2860
|
+
};
|
|
2861
|
+
type UIDataPartSchemas = Record<string, FlexibleSchema>;
|
|
2862
|
+
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
|
2863
|
+
[K in keyof T]: FlexibleSchema<T[K]>;
|
|
2864
|
+
};
|
|
2865
|
+
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
|
2866
|
+
[K in keyof T]: InferSchema<T[K]>;
|
|
2867
|
+
};
|
|
2868
|
+
type ChatRequestOptions = {
|
|
2391
2869
|
/**
|
|
2392
|
-
Additional
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
*/
|
|
2396
|
-
providerOptions?: ProviderOptions;
|
|
2870
|
+
Additional headers that should be to be passed to the API endpoint.
|
|
2871
|
+
*/
|
|
2872
|
+
headers?: Record<string, string> | Headers;
|
|
2397
2873
|
/**
|
|
2398
|
-
|
|
2874
|
+
Additional body JSON properties that should be sent to the API endpoint.
|
|
2399
2875
|
*/
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2876
|
+
body?: object;
|
|
2877
|
+
metadata?: unknown;
|
|
2878
|
+
};
|
|
2403
2879
|
/**
|
|
2404
|
-
|
|
2405
|
-
It contains the embeddings, the values, and additional information.
|
|
2880
|
+
* Function that can be called to add a tool approval response to the chat.
|
|
2406
2881
|
*/
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
The values that were embedded.
|
|
2410
|
-
*/
|
|
2411
|
-
readonly values: Array<VALUE>;
|
|
2412
|
-
/**
|
|
2413
|
-
The embeddings. They are in the same order as the values.
|
|
2414
|
-
*/
|
|
2415
|
-
readonly embeddings: Array<Embedding>;
|
|
2416
|
-
/**
|
|
2417
|
-
The embedding token usage.
|
|
2418
|
-
*/
|
|
2419
|
-
readonly usage: EmbeddingModelUsage;
|
|
2882
|
+
type ChatAddToolApproveResponseFunction = ({ id, approved, reason, }: {
|
|
2883
|
+
id: string;
|
|
2420
2884
|
/**
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2885
|
+
* Flag indicating whether the approval was granted or denied.
|
|
2886
|
+
*/
|
|
2887
|
+
approved: boolean;
|
|
2424
2888
|
/**
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
|
|
2431
|
-
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2889
|
+
* Optional reason for the approval or denial.
|
|
2890
|
+
*/
|
|
2891
|
+
reason?: string;
|
|
2892
|
+
}) => void | PromiseLike<void>;
|
|
2893
|
+
type ChatStatus = 'submitted' | 'streaming' | 'ready' | 'error';
|
|
2894
|
+
interface ChatState<UI_MESSAGE extends UIMessage> {
|
|
2895
|
+
status: ChatStatus;
|
|
2896
|
+
error: Error | undefined;
|
|
2897
|
+
messages: UI_MESSAGE[];
|
|
2898
|
+
pushMessage: (message: UI_MESSAGE) => void;
|
|
2899
|
+
popMessage: () => void;
|
|
2900
|
+
replaceMessage: (index: number, message: UI_MESSAGE) => void;
|
|
2901
|
+
snapshot: <T>(thing: T) => T;
|
|
2437
2902
|
}
|
|
2438
|
-
|
|
2903
|
+
type ChatOnErrorCallback = (error: Error) => void;
|
|
2904
|
+
type ChatOnToolCallCallback<UI_MESSAGE extends UIMessage = UIMessage> = (options: {
|
|
2905
|
+
toolCall: InferUIMessageToolCall<UI_MESSAGE>;
|
|
2906
|
+
}) => void | PromiseLike<void>;
|
|
2907
|
+
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
|
2439
2908
|
/**
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
|
|
2446
|
-
@param
|
|
2447
|
-
@param
|
|
2448
|
-
|
|
2449
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
2450
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
2451
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
2452
|
-
|
|
2453
|
-
@returns A result object that contains the embeddings, the value, and additional information.
|
|
2909
|
+
* Function that is called when the assistant response has finished streaming.
|
|
2910
|
+
*
|
|
2911
|
+
* @param message The assistant message that was streamed.
|
|
2912
|
+
* @param messages The full chat history, including the assistant message.
|
|
2913
|
+
*
|
|
2914
|
+
* @param isAbort Indicates whether the request has been aborted.
|
|
2915
|
+
* @param isDisconnect Indicates whether the request has been ended by a network error.
|
|
2916
|
+
* @param isError Indicates whether the request has been ended by an error.
|
|
2917
|
+
* @param finishReason The reason why the generation finished.
|
|
2454
2918
|
*/
|
|
2455
|
-
|
|
2919
|
+
type ChatOnFinishCallback<UI_MESSAGE extends UIMessage> = (options: {
|
|
2920
|
+
message: UI_MESSAGE;
|
|
2921
|
+
messages: UI_MESSAGE[];
|
|
2922
|
+
isAbort: boolean;
|
|
2923
|
+
isDisconnect: boolean;
|
|
2924
|
+
isError: boolean;
|
|
2925
|
+
finishReason?: FinishReason;
|
|
2926
|
+
}) => void;
|
|
2927
|
+
interface ChatInit<UI_MESSAGE extends UIMessage> {
|
|
2456
2928
|
/**
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2929
|
+
* A unique identifier for the chat. If not provided, a random one will be
|
|
2930
|
+
* generated.
|
|
2931
|
+
*/
|
|
2932
|
+
id?: string;
|
|
2933
|
+
messageMetadataSchema?: FlexibleSchema<InferUIMessageMetadata<UI_MESSAGE>>;
|
|
2934
|
+
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
|
2935
|
+
messages?: UI_MESSAGE[];
|
|
2460
2936
|
/**
|
|
2461
|
-
|
|
2937
|
+
* A way to provide a function that is going to be used for ids for messages and the chat.
|
|
2938
|
+
* If not provided the default AI SDK `generateId` is used.
|
|
2462
2939
|
*/
|
|
2463
|
-
|
|
2940
|
+
generateId?: IdGenerator;
|
|
2941
|
+
transport?: ChatTransport<UI_MESSAGE>;
|
|
2464
2942
|
/**
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
@default 2
|
|
2943
|
+
* Callback function to be called when an error is encountered.
|
|
2468
2944
|
*/
|
|
2469
|
-
|
|
2945
|
+
onError?: ChatOnErrorCallback;
|
|
2470
2946
|
/**
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2947
|
+
Optional callback function that is invoked when a tool call is received.
|
|
2948
|
+
Intended for automatic client-side tool execution.
|
|
2949
|
+
|
|
2950
|
+
You can optionally return a result for the tool call,
|
|
2951
|
+
either synchronously or asynchronously.
|
|
2952
|
+
*/
|
|
2953
|
+
onToolCall?: ChatOnToolCallCallback<UI_MESSAGE>;
|
|
2474
2954
|
/**
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
headers?: Record<string, string>;
|
|
2955
|
+
* Function that is called when the assistant response has finished streaming.
|
|
2956
|
+
*/
|
|
2957
|
+
onFinish?: ChatOnFinishCallback<UI_MESSAGE>;
|
|
2479
2958
|
/**
|
|
2480
|
-
* Optional
|
|
2959
|
+
* Optional callback function that is called when a data part is received.
|
|
2960
|
+
*
|
|
2961
|
+
* @param data The data part that was received.
|
|
2481
2962
|
*/
|
|
2482
|
-
|
|
2963
|
+
onData?: ChatOnDataCallback<UI_MESSAGE>;
|
|
2964
|
+
/**
|
|
2965
|
+
* When provided, this function will be called when the stream is finished or a tool call is added
|
|
2966
|
+
* to determine if the current messages should be resubmitted.
|
|
2967
|
+
*/
|
|
2968
|
+
sendAutomaticallyWhen?: (options: {
|
|
2969
|
+
messages: UI_MESSAGE[];
|
|
2970
|
+
}) => boolean | PromiseLike<boolean>;
|
|
2971
|
+
}
|
|
2972
|
+
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
|
2973
|
+
readonly id: string;
|
|
2974
|
+
readonly generateId: IdGenerator;
|
|
2975
|
+
protected state: ChatState<UI_MESSAGE>;
|
|
2976
|
+
private messageMetadataSchema;
|
|
2977
|
+
private dataPartSchemas;
|
|
2978
|
+
private readonly transport;
|
|
2979
|
+
private onError?;
|
|
2980
|
+
private onToolCall?;
|
|
2981
|
+
private onFinish?;
|
|
2982
|
+
private onData?;
|
|
2983
|
+
private sendAutomaticallyWhen?;
|
|
2984
|
+
private activeResponse;
|
|
2985
|
+
private jobExecutor;
|
|
2986
|
+
constructor({ generateId, id, transport, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, sendAutomaticallyWhen, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
|
2987
|
+
state: ChatState<UI_MESSAGE>;
|
|
2988
|
+
});
|
|
2989
|
+
/**
|
|
2990
|
+
* Hook status:
|
|
2991
|
+
*
|
|
2992
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
|
2993
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
|
2994
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
|
2995
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
|
2996
|
+
*/
|
|
2997
|
+
get status(): ChatStatus;
|
|
2998
|
+
protected setStatus({ status, error, }: {
|
|
2999
|
+
status: ChatStatus;
|
|
3000
|
+
error?: Error;
|
|
3001
|
+
}): void;
|
|
3002
|
+
get error(): Error | undefined;
|
|
3003
|
+
get messages(): UI_MESSAGE[];
|
|
3004
|
+
get lastMessage(): UI_MESSAGE | undefined;
|
|
3005
|
+
set messages(messages: UI_MESSAGE[]);
|
|
3006
|
+
/**
|
|
3007
|
+
* Appends or replaces a user message to the chat list. This triggers the API call to fetch
|
|
3008
|
+
* the assistant's response.
|
|
3009
|
+
*
|
|
3010
|
+
* If a messageId is provided, the message will be replaced.
|
|
3011
|
+
*/
|
|
3012
|
+
sendMessage: (message?: (CreateUIMessage<UI_MESSAGE> & {
|
|
3013
|
+
text?: never;
|
|
3014
|
+
files?: never;
|
|
3015
|
+
messageId?: string;
|
|
3016
|
+
}) | {
|
|
3017
|
+
text: string;
|
|
3018
|
+
files?: FileList | FileUIPart[];
|
|
3019
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
|
3020
|
+
parts?: never;
|
|
3021
|
+
messageId?: string;
|
|
3022
|
+
} | {
|
|
3023
|
+
files: FileList | FileUIPart[];
|
|
3024
|
+
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
|
3025
|
+
parts?: never;
|
|
3026
|
+
messageId?: string;
|
|
3027
|
+
}, options?: ChatRequestOptions) => Promise<void>;
|
|
3028
|
+
/**
|
|
3029
|
+
* Regenerate the assistant message with the provided message id.
|
|
3030
|
+
* If no message id is provided, the last assistant message will be regenerated.
|
|
3031
|
+
*/
|
|
3032
|
+
regenerate: ({ messageId, ...options }?: {
|
|
3033
|
+
messageId?: string;
|
|
3034
|
+
} & ChatRequestOptions) => Promise<void>;
|
|
3035
|
+
/**
|
|
3036
|
+
* Attempt to resume an ongoing streaming response.
|
|
3037
|
+
*/
|
|
3038
|
+
resumeStream: (options?: ChatRequestOptions) => Promise<void>;
|
|
3039
|
+
/**
|
|
3040
|
+
* Clear the error state and set the status to ready if the chat is in an error state.
|
|
3041
|
+
*/
|
|
3042
|
+
clearError: () => void;
|
|
3043
|
+
addToolApprovalResponse: ChatAddToolApproveResponseFunction;
|
|
3044
|
+
addToolOutput: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ state, tool, toolCallId, output, errorText, }: {
|
|
3045
|
+
state?: "output-available";
|
|
3046
|
+
tool: TOOL;
|
|
3047
|
+
toolCallId: string;
|
|
3048
|
+
output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
|
|
3049
|
+
errorText?: never;
|
|
3050
|
+
} | {
|
|
3051
|
+
state: "output-error";
|
|
3052
|
+
tool: TOOL;
|
|
3053
|
+
toolCallId: string;
|
|
3054
|
+
output?: never;
|
|
3055
|
+
errorText: string;
|
|
3056
|
+
}) => Promise<void>;
|
|
3057
|
+
/** @deprecated Use addToolOutput */
|
|
3058
|
+
addToolResult: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ state, tool, toolCallId, output, errorText, }: {
|
|
3059
|
+
state?: "output-available";
|
|
3060
|
+
tool: TOOL;
|
|
3061
|
+
toolCallId: string;
|
|
3062
|
+
output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
|
|
3063
|
+
errorText?: never;
|
|
3064
|
+
} | {
|
|
3065
|
+
state: "output-error";
|
|
3066
|
+
tool: TOOL;
|
|
3067
|
+
toolCallId: string;
|
|
3068
|
+
output?: never;
|
|
3069
|
+
errorText: string;
|
|
3070
|
+
}) => Promise<void>;
|
|
3071
|
+
/**
|
|
3072
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
|
3073
|
+
*/
|
|
3074
|
+
stop: () => Promise<void>;
|
|
3075
|
+
private makeRequest;
|
|
3076
|
+
}
|
|
3077
|
+
|
|
3078
|
+
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
|
3079
|
+
|
|
3080
|
+
/**
|
|
3081
|
+
Converts an array of UI messages from useChat into an array of ModelMessages that can be used
|
|
3082
|
+
with the AI functions (e.g. `streamText`, `generateText`).
|
|
3083
|
+
|
|
3084
|
+
@param messages - The UI messages to convert.
|
|
3085
|
+
@param options.tools - The tools to use.
|
|
3086
|
+
@param options.ignoreIncompleteToolCalls - Whether to ignore incomplete tool calls. Default is `false`.
|
|
3087
|
+
@param options.convertDataPart - Optional function to convert data parts to text or file model message parts. Returns `undefined` if the part should be ignored.
|
|
3088
|
+
|
|
3089
|
+
@returns An array of ModelMessages.
|
|
3090
|
+
*/
|
|
3091
|
+
declare function convertToModelMessages<UI_MESSAGE extends UIMessage>(messages: Array<Omit<UI_MESSAGE, 'id'>>, options?: {
|
|
3092
|
+
tools?: ToolSet;
|
|
3093
|
+
ignoreIncompleteToolCalls?: boolean;
|
|
3094
|
+
convertDataPart?: (part: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => TextPart | FilePart | undefined;
|
|
3095
|
+
}): ModelMessage[];
|
|
3096
|
+
/**
|
|
3097
|
+
@deprecated Use `convertToModelMessages` instead.
|
|
3098
|
+
*/
|
|
3099
|
+
declare const convertToCoreMessages: typeof convertToModelMessages;
|
|
3100
|
+
|
|
3101
|
+
type PrepareSendMessagesRequest<UI_MESSAGE extends UIMessage> = (options: {
|
|
3102
|
+
id: string;
|
|
3103
|
+
messages: UI_MESSAGE[];
|
|
3104
|
+
requestMetadata: unknown;
|
|
3105
|
+
body: Record<string, any> | undefined;
|
|
3106
|
+
credentials: RequestCredentials | undefined;
|
|
3107
|
+
headers: HeadersInit | undefined;
|
|
3108
|
+
api: string;
|
|
3109
|
+
} & {
|
|
3110
|
+
trigger: 'submit-message' | 'regenerate-message';
|
|
3111
|
+
messageId: string | undefined;
|
|
3112
|
+
}) => {
|
|
3113
|
+
body: object;
|
|
3114
|
+
headers?: HeadersInit;
|
|
3115
|
+
credentials?: RequestCredentials;
|
|
3116
|
+
api?: string;
|
|
3117
|
+
} | PromiseLike<{
|
|
3118
|
+
body: object;
|
|
3119
|
+
headers?: HeadersInit;
|
|
3120
|
+
credentials?: RequestCredentials;
|
|
3121
|
+
api?: string;
|
|
3122
|
+
}>;
|
|
3123
|
+
type PrepareReconnectToStreamRequest = (options: {
|
|
3124
|
+
id: string;
|
|
3125
|
+
requestMetadata: unknown;
|
|
3126
|
+
body: Record<string, any> | undefined;
|
|
3127
|
+
credentials: RequestCredentials | undefined;
|
|
3128
|
+
headers: HeadersInit | undefined;
|
|
3129
|
+
api: string;
|
|
3130
|
+
}) => {
|
|
3131
|
+
headers?: HeadersInit;
|
|
3132
|
+
credentials?: RequestCredentials;
|
|
3133
|
+
api?: string;
|
|
3134
|
+
} | PromiseLike<{
|
|
3135
|
+
headers?: HeadersInit;
|
|
3136
|
+
credentials?: RequestCredentials;
|
|
3137
|
+
api?: string;
|
|
3138
|
+
}>;
|
|
3139
|
+
/**
|
|
3140
|
+
* Options for the `HttpChatTransport` class.
|
|
3141
|
+
*
|
|
3142
|
+
* @param UI_MESSAGE - The type of message to be used in the chat.
|
|
3143
|
+
*/
|
|
3144
|
+
type HttpChatTransportInitOptions<UI_MESSAGE extends UIMessage> = {
|
|
3145
|
+
/**
|
|
3146
|
+
* The API URL to be used for the chat transport.
|
|
3147
|
+
* Defaults to '/api/chat'.
|
|
3148
|
+
*/
|
|
3149
|
+
api?: string;
|
|
3150
|
+
/**
|
|
3151
|
+
* The credentials mode to be used for the fetch request.
|
|
3152
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
|
3153
|
+
* Defaults to 'same-origin'.
|
|
3154
|
+
*/
|
|
3155
|
+
credentials?: Resolvable<RequestCredentials>;
|
|
3156
|
+
/**
|
|
3157
|
+
* HTTP headers to be sent with the API request.
|
|
3158
|
+
*/
|
|
3159
|
+
headers?: Resolvable<Record<string, string> | Headers>;
|
|
3160
|
+
/**
|
|
3161
|
+
* Extra body object to be sent with the API request.
|
|
3162
|
+
* @example
|
|
3163
|
+
* Send a `sessionId` to the API along with the messages.
|
|
3164
|
+
* ```js
|
|
3165
|
+
* useChat({
|
|
3166
|
+
* body: {
|
|
3167
|
+
* sessionId: '123',
|
|
3168
|
+
* }
|
|
3169
|
+
* })
|
|
3170
|
+
* ```
|
|
3171
|
+
*/
|
|
3172
|
+
body?: Resolvable<object>;
|
|
3173
|
+
/**
|
|
3174
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
|
3175
|
+
or to provide a custom fetch implementation for e.g. testing.
|
|
3176
|
+
*/
|
|
3177
|
+
fetch?: FetchFunction;
|
|
3178
|
+
/**
|
|
3179
|
+
* When a function is provided, it will be used
|
|
3180
|
+
* to prepare the request body for the chat API. This can be useful for
|
|
3181
|
+
* customizing the request body based on the messages and data in the chat.
|
|
3182
|
+
*
|
|
3183
|
+
* @param id The id of the chat.
|
|
3184
|
+
* @param messages The current messages in the chat.
|
|
3185
|
+
* @param requestBody The request body object passed in the chat request.
|
|
3186
|
+
*/
|
|
3187
|
+
prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
|
3188
|
+
/**
|
|
3189
|
+
* When a function is provided, it will be used
|
|
3190
|
+
* to prepare the request body for the chat API. This can be useful for
|
|
3191
|
+
* customizing the request body based on the messages and data in the chat.
|
|
3192
|
+
*
|
|
3193
|
+
* @param id The id of the chat.
|
|
3194
|
+
* @param messages The current messages in the chat.
|
|
3195
|
+
* @param requestBody The request body object passed in the chat request.
|
|
3196
|
+
*/
|
|
3197
|
+
prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
|
3198
|
+
};
|
|
3199
|
+
declare abstract class HttpChatTransport<UI_MESSAGE extends UIMessage> implements ChatTransport<UI_MESSAGE> {
|
|
3200
|
+
protected api: string;
|
|
3201
|
+
protected credentials: HttpChatTransportInitOptions<UI_MESSAGE>['credentials'];
|
|
3202
|
+
protected headers: HttpChatTransportInitOptions<UI_MESSAGE>['headers'];
|
|
3203
|
+
protected body: HttpChatTransportInitOptions<UI_MESSAGE>['body'];
|
|
3204
|
+
protected fetch?: FetchFunction;
|
|
3205
|
+
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
|
3206
|
+
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
|
3207
|
+
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
|
3208
|
+
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
|
3209
|
+
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
|
3210
|
+
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
3211
|
+
}
|
|
3212
|
+
|
|
3213
|
+
declare class DefaultChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
|
3214
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
|
3215
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
3216
|
+
}
|
|
3217
|
+
|
|
3218
|
+
/**
|
|
3219
|
+
Check if the last message is an assistant message with completed tool call approvals.
|
|
3220
|
+
The last step of the message must have at least one tool approval response and
|
|
3221
|
+
all tool approvals must have a response.
|
|
3222
|
+
*/
|
|
3223
|
+
declare function lastAssistantMessageIsCompleteWithApprovalResponses({ messages, }: {
|
|
3224
|
+
messages: UIMessage[];
|
|
3225
|
+
}): boolean;
|
|
3226
|
+
|
|
3227
|
+
/**
|
|
3228
|
+
Check if the message is an assistant message with completed tool calls.
|
|
3229
|
+
The last step of the message must have at least one tool invocation and
|
|
3230
|
+
all tool invocations must have a result.
|
|
3231
|
+
*/
|
|
3232
|
+
declare function lastAssistantMessageIsCompleteWithToolCalls({ messages, }: {
|
|
3233
|
+
messages: UIMessage[];
|
|
3234
|
+
}): boolean;
|
|
3235
|
+
|
|
3236
|
+
declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
|
3237
|
+
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
|
3238
|
+
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
3239
|
+
}
|
|
3240
|
+
|
|
3241
|
+
type CompletionRequestOptions = {
|
|
3242
|
+
/**
|
|
3243
|
+
An optional object of headers to be passed to the API endpoint.
|
|
3244
|
+
*/
|
|
3245
|
+
headers?: Record<string, string> | Headers;
|
|
3246
|
+
/**
|
|
3247
|
+
An optional object to be passed to the API endpoint.
|
|
3248
|
+
*/
|
|
3249
|
+
body?: object;
|
|
3250
|
+
};
|
|
3251
|
+
type UseCompletionOptions = {
|
|
3252
|
+
/**
|
|
3253
|
+
* The API endpoint that accepts a `{ prompt: string }` object and returns
|
|
3254
|
+
* a stream of tokens of the AI completion response. Defaults to `/api/completion`.
|
|
3255
|
+
*/
|
|
3256
|
+
api?: string;
|
|
3257
|
+
/**
|
|
3258
|
+
* An unique identifier for the chat. If not provided, a random one will be
|
|
3259
|
+
* generated. When provided, the `useChat` hook with the same `id` will
|
|
3260
|
+
* have shared states across components.
|
|
3261
|
+
*/
|
|
3262
|
+
id?: string;
|
|
3263
|
+
/**
|
|
3264
|
+
* Initial prompt input of the completion.
|
|
3265
|
+
*/
|
|
3266
|
+
initialInput?: string;
|
|
3267
|
+
/**
|
|
3268
|
+
* Initial completion result. Useful to load an existing history.
|
|
3269
|
+
*/
|
|
3270
|
+
initialCompletion?: string;
|
|
3271
|
+
/**
|
|
3272
|
+
* Callback function to be called when the completion is finished streaming.
|
|
3273
|
+
*/
|
|
3274
|
+
onFinish?: (prompt: string, completion: string) => void;
|
|
3275
|
+
/**
|
|
3276
|
+
* Callback function to be called when an error is encountered.
|
|
3277
|
+
*/
|
|
3278
|
+
onError?: (error: Error) => void;
|
|
3279
|
+
/**
|
|
3280
|
+
* The credentials mode to be used for the fetch request.
|
|
3281
|
+
* Possible values are: 'omit', 'same-origin', 'include'.
|
|
3282
|
+
* Defaults to 'same-origin'.
|
|
3283
|
+
*/
|
|
3284
|
+
credentials?: RequestCredentials;
|
|
3285
|
+
/**
|
|
3286
|
+
* HTTP headers to be sent with the API request.
|
|
3287
|
+
*/
|
|
3288
|
+
headers?: Record<string, string> | Headers;
|
|
3289
|
+
/**
|
|
3290
|
+
* Extra body object to be sent with the API request.
|
|
3291
|
+
* @example
|
|
3292
|
+
* Send a `sessionId` to the API along with the prompt.
|
|
3293
|
+
* ```js
|
|
3294
|
+
* useChat({
|
|
3295
|
+
* body: {
|
|
3296
|
+
* sessionId: '123',
|
|
3297
|
+
* }
|
|
3298
|
+
* })
|
|
3299
|
+
* ```
|
|
3300
|
+
*/
|
|
3301
|
+
body?: object;
|
|
3302
|
+
/**
|
|
3303
|
+
Streaming protocol that is used. Defaults to `data`.
|
|
3304
|
+
*/
|
|
3305
|
+
streamProtocol?: 'data' | 'text';
|
|
3306
|
+
/**
|
|
3307
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
|
3308
|
+
or to provide a custom fetch implementation for e.g. testing.
|
|
3309
|
+
*/
|
|
3310
|
+
fetch?: FetchFunction;
|
|
3311
|
+
};
|
|
3312
|
+
|
|
3313
|
+
type SafeValidateUIMessagesResult<UI_MESSAGE extends UIMessage> = {
|
|
3314
|
+
success: true;
|
|
3315
|
+
data: Array<UI_MESSAGE>;
|
|
3316
|
+
} | {
|
|
3317
|
+
success: false;
|
|
3318
|
+
error: Error;
|
|
3319
|
+
};
|
|
3320
|
+
/**
|
|
3321
|
+
* Validates a list of UI messages like `validateUIMessages`,
|
|
3322
|
+
* but instead of throwing it returns `{ success: true, data }`
|
|
3323
|
+
* or `{ success: false, error }`.
|
|
3324
|
+
*/
|
|
3325
|
+
declare function safeValidateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
|
|
3326
|
+
messages: unknown;
|
|
3327
|
+
metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
|
|
3328
|
+
dataSchemas?: {
|
|
3329
|
+
[NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
|
|
3330
|
+
};
|
|
3331
|
+
tools?: {
|
|
3332
|
+
[NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
|
|
3333
|
+
};
|
|
3334
|
+
}): Promise<SafeValidateUIMessagesResult<UI_MESSAGE>>;
|
|
3335
|
+
/**
|
|
3336
|
+
* Validates a list of UI messages.
|
|
3337
|
+
*
|
|
3338
|
+
* Metadata, data parts, and generic tool call structures are only validated if
|
|
3339
|
+
* the corresponding schemas are provided. Otherwise, they are assumed to be
|
|
3340
|
+
* valid.
|
|
3341
|
+
*/
|
|
3342
|
+
declare function validateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
|
|
3343
|
+
messages: unknown;
|
|
3344
|
+
metadataSchema?: FlexibleSchema<UIMessage['metadata']>;
|
|
3345
|
+
dataSchemas?: {
|
|
3346
|
+
[NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: FlexibleSchema<InferUIMessageData<UI_MESSAGE>[NAME]>;
|
|
3347
|
+
};
|
|
3348
|
+
tools?: {
|
|
3349
|
+
[NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
|
|
3350
|
+
};
|
|
3351
|
+
}): Promise<Array<UI_MESSAGE>>;
|
|
3352
|
+
|
|
3353
|
+
interface UIMessageStreamWriter<UI_MESSAGE extends UIMessage = UIMessage> {
|
|
3354
|
+
/**
|
|
3355
|
+
* Appends a data stream part to the stream.
|
|
3356
|
+
*/
|
|
3357
|
+
write(part: InferUIMessageChunk<UI_MESSAGE>): void;
|
|
3358
|
+
/**
|
|
3359
|
+
* Merges the contents of another stream to this stream.
|
|
3360
|
+
*/
|
|
3361
|
+
merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
|
|
3362
|
+
/**
|
|
3363
|
+
* Error handler that is used by the data stream writer.
|
|
3364
|
+
* This is intended for forwarding when merging streams
|
|
3365
|
+
* to prevent duplicated error masking.
|
|
3366
|
+
*/
|
|
3367
|
+
onError: ErrorHandler | undefined;
|
|
3368
|
+
}
|
|
3369
|
+
|
|
3370
|
+
declare function createUIMessageStream<UI_MESSAGE extends UIMessage>({ execute, onError, originalMessages, onFinish, generateId, }: {
|
|
3371
|
+
execute: (options: {
|
|
3372
|
+
writer: UIMessageStreamWriter<UI_MESSAGE>;
|
|
3373
|
+
}) => Promise<void> | void;
|
|
3374
|
+
onError?: (error: unknown) => string;
|
|
3375
|
+
/**
|
|
3376
|
+
* The original messages. If they are provided, persistence mode is assumed,
|
|
3377
|
+
* and a message ID is provided for the response message.
|
|
3378
|
+
*/
|
|
3379
|
+
originalMessages?: UI_MESSAGE[];
|
|
3380
|
+
onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
|
|
3381
|
+
generateId?: IdGenerator;
|
|
3382
|
+
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
|
3383
|
+
|
|
3384
|
+
declare function createUIMessageStreamResponse({ status, statusText, headers, stream, consumeSseStream, }: UIMessageStreamResponseInit & {
|
|
3385
|
+
stream: ReadableStream<UIMessageChunk>;
|
|
3386
|
+
}): Response;
|
|
3387
|
+
|
|
3388
|
+
declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
|
|
3389
|
+
constructor();
|
|
3390
|
+
}
|
|
3391
|
+
|
|
3392
|
+
declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, consumeSseStream, }: {
|
|
3393
|
+
response: ServerResponse;
|
|
3394
|
+
stream: ReadableStream<UIMessageChunk>;
|
|
3395
|
+
} & UIMessageStreamResponseInit): void;
|
|
3396
|
+
|
|
3397
|
+
/**
|
|
3398
|
+
* Transforms a stream of `UIMessageChunk`s into an `AsyncIterableStream` of `UIMessage`s.
|
|
3399
|
+
*
|
|
3400
|
+
* @param options.message - The last assistant message to use as a starting point when the conversation is resumed. Otherwise undefined.
|
|
3401
|
+
* @param options.stream - The stream of `UIMessageChunk`s to read.
|
|
3402
|
+
* @param options.terminateOnError - Whether to terminate the stream if an error occurs.
|
|
3403
|
+
* @param options.onError - A function that is called when an error occurs.
|
|
3404
|
+
*
|
|
3405
|
+
* @returns An `AsyncIterableStream` of `UIMessage`s. Each stream part is a different state of the same message
|
|
3406
|
+
* as it is being completed.
|
|
3407
|
+
*/
|
|
3408
|
+
declare function readUIMessageStream<UI_MESSAGE extends UIMessage>({ message, stream, onError, terminateOnError, }: {
|
|
3409
|
+
message?: UI_MESSAGE;
|
|
3410
|
+
stream: ReadableStream<UIMessageChunk>;
|
|
3411
|
+
onError?: (error: unknown) => void;
|
|
3412
|
+
terminateOnError?: boolean;
|
|
3413
|
+
}): AsyncIterableStream<UI_MESSAGE>;
|
|
3414
|
+
|
|
3415
|
+
declare const UI_MESSAGE_STREAM_HEADERS: {
|
|
3416
|
+
'content-type': string;
|
|
3417
|
+
'cache-control': string;
|
|
3418
|
+
connection: string;
|
|
3419
|
+
'x-vercel-ai-ui-message-stream': string;
|
|
3420
|
+
'x-accel-buffering': string;
|
|
3421
|
+
};
|
|
3422
|
+
|
|
3423
|
+
/**
|
|
3424
|
+
* Runs the agent and stream the output as a UI message stream.
|
|
3425
|
+
*
|
|
3426
|
+
* @param agent - The agent to run.
|
|
3427
|
+
* @param messages - The input UI messages.
|
|
3428
|
+
*
|
|
3429
|
+
* @returns The UI message stream.
|
|
3430
|
+
*/
|
|
3431
|
+
declare function createAgentUIStream<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ agent, messages, options, ...uiMessageStreamOptions }: {
|
|
3432
|
+
agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
|
|
3433
|
+
messages: unknown[];
|
|
3434
|
+
options?: CALL_OPTIONS;
|
|
3435
|
+
} & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>>>;
|
|
3436
|
+
|
|
3437
|
+
/**
|
|
3438
|
+
* Pipes the agent UI message stream to a Node.js ServerResponse object.
|
|
3439
|
+
*
|
|
3440
|
+
* @param agent - The agent to run.
|
|
3441
|
+
* @param messages - The input UI messages.
|
|
3442
|
+
*/
|
|
3443
|
+
declare function pipeAgentUIStreamToResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ response, headers, status, statusText, consumeSseStream, ...options }: {
|
|
3444
|
+
response: ServerResponse;
|
|
3445
|
+
agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
|
|
3446
|
+
messages: unknown[];
|
|
3447
|
+
options?: CALL_OPTIONS;
|
|
3448
|
+
} & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<void>;
|
|
3449
|
+
|
|
3450
|
+
/**
|
|
3451
|
+
The result of an `embed` call.
|
|
3452
|
+
It contains the embedding, the value, and additional information.
|
|
3453
|
+
*/
|
|
3454
|
+
interface EmbedResult<VALUE> {
|
|
3455
|
+
/**
|
|
3456
|
+
The value that was embedded.
|
|
3457
|
+
*/
|
|
3458
|
+
readonly value: VALUE;
|
|
3459
|
+
/**
|
|
3460
|
+
The embedding of the value.
|
|
3461
|
+
*/
|
|
3462
|
+
readonly embedding: Embedding;
|
|
3463
|
+
/**
|
|
3464
|
+
The embedding token usage.
|
|
3465
|
+
*/
|
|
3466
|
+
readonly usage: EmbeddingModelUsage;
|
|
3467
|
+
/**
|
|
3468
|
+
Optional provider-specific metadata.
|
|
3469
|
+
*/
|
|
3470
|
+
readonly providerMetadata?: ProviderMetadata;
|
|
3471
|
+
/**
|
|
3472
|
+
Optional response data.
|
|
3473
|
+
*/
|
|
3474
|
+
readonly response?: {
|
|
3475
|
+
/**
|
|
3476
|
+
Response headers.
|
|
3477
|
+
*/
|
|
3478
|
+
headers?: Record<string, string>;
|
|
3479
|
+
/**
|
|
3480
|
+
The response body.
|
|
3481
|
+
*/
|
|
3482
|
+
body?: unknown;
|
|
3483
|
+
};
|
|
3484
|
+
}
|
|
3485
|
+
|
|
3486
|
+
/**
|
|
3487
|
+
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
|
3488
|
+
|
|
3489
|
+
@param model - The embedding model to use.
|
|
3490
|
+
@param value - The value that should be embedded.
|
|
3491
|
+
|
|
3492
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
3493
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
3494
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
3495
|
+
|
|
3496
|
+
@returns A result object that contains the embedding, the value, and additional information.
|
|
3497
|
+
*/
|
|
3498
|
+
declare function embed<VALUE = string>({ model: modelArg, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
|
3499
|
+
/**
|
|
3500
|
+
The embedding model to use.
|
|
3501
|
+
*/
|
|
3502
|
+
model: EmbeddingModel<VALUE>;
|
|
3503
|
+
/**
|
|
3504
|
+
The value that should be embedded.
|
|
3505
|
+
*/
|
|
3506
|
+
value: VALUE;
|
|
3507
|
+
/**
|
|
3508
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
|
3509
|
+
|
|
3510
|
+
@default 2
|
|
3511
|
+
*/
|
|
3512
|
+
maxRetries?: number;
|
|
3513
|
+
/**
|
|
3514
|
+
Abort signal.
|
|
3515
|
+
*/
|
|
3516
|
+
abortSignal?: AbortSignal;
|
|
3517
|
+
/**
|
|
3518
|
+
Additional headers to include in the request.
|
|
3519
|
+
Only applicable for HTTP-based providers.
|
|
3520
|
+
*/
|
|
3521
|
+
headers?: Record<string, string>;
|
|
3522
|
+
/**
|
|
3523
|
+
Additional provider-specific options. They are passed through
|
|
3524
|
+
to the provider from the AI SDK and enable provider-specific
|
|
3525
|
+
functionality that can be fully encapsulated in the provider.
|
|
3526
|
+
*/
|
|
3527
|
+
providerOptions?: ProviderOptions;
|
|
3528
|
+
/**
|
|
3529
|
+
* Optional telemetry configuration (experimental).
|
|
3530
|
+
*/
|
|
3531
|
+
experimental_telemetry?: TelemetrySettings;
|
|
3532
|
+
}): Promise<EmbedResult<VALUE>>;
|
|
3533
|
+
|
|
3534
|
+
/**
|
|
3535
|
+
The result of a `embedMany` call.
|
|
3536
|
+
It contains the embeddings, the values, and additional information.
|
|
3537
|
+
*/
|
|
3538
|
+
interface EmbedManyResult<VALUE> {
|
|
3539
|
+
/**
|
|
3540
|
+
The values that were embedded.
|
|
3541
|
+
*/
|
|
3542
|
+
readonly values: Array<VALUE>;
|
|
3543
|
+
/**
|
|
3544
|
+
The embeddings. They are in the same order as the values.
|
|
3545
|
+
*/
|
|
3546
|
+
readonly embeddings: Array<Embedding>;
|
|
3547
|
+
/**
|
|
3548
|
+
The embedding token usage.
|
|
3549
|
+
*/
|
|
3550
|
+
readonly usage: EmbeddingModelUsage;
|
|
3551
|
+
/**
|
|
3552
|
+
Optional provider-specific metadata.
|
|
3553
|
+
*/
|
|
3554
|
+
readonly providerMetadata?: ProviderMetadata;
|
|
3555
|
+
/**
|
|
3556
|
+
Optional raw response data.
|
|
3557
|
+
*/
|
|
3558
|
+
readonly responses?: Array<{
|
|
3559
|
+
/**
|
|
3560
|
+
Response headers.
|
|
3561
|
+
*/
|
|
3562
|
+
headers?: Record<string, string>;
|
|
3563
|
+
/**
|
|
3564
|
+
The response body.
|
|
3565
|
+
*/
|
|
3566
|
+
body?: unknown;
|
|
3567
|
+
} | undefined>;
|
|
3568
|
+
}
|
|
3569
|
+
|
|
3570
|
+
/**
|
|
3571
|
+
Embed several values using an embedding model. The type of the value is defined
|
|
3572
|
+
by the embedding model.
|
|
3573
|
+
|
|
3574
|
+
`embedMany` automatically splits large requests into smaller chunks if the model
|
|
3575
|
+
has a limit on how many embeddings can be generated in a single call.
|
|
3576
|
+
|
|
3577
|
+
@param model - The embedding model to use.
|
|
3578
|
+
@param values - The values that should be embedded.
|
|
3579
|
+
|
|
3580
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
3581
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
3582
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
3583
|
+
|
|
3584
|
+
@returns A result object that contains the embeddings, the value, and additional information.
|
|
3585
|
+
*/
|
|
3586
|
+
declare function embedMany<VALUE = string>({ model: modelArg, values, maxParallelCalls, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
|
3587
|
+
/**
|
|
3588
|
+
The embedding model to use.
|
|
3589
|
+
*/
|
|
3590
|
+
model: EmbeddingModel<VALUE>;
|
|
3591
|
+
/**
|
|
3592
|
+
The values that should be embedded.
|
|
3593
|
+
*/
|
|
3594
|
+
values: Array<VALUE>;
|
|
3595
|
+
/**
|
|
3596
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
|
3597
|
+
|
|
3598
|
+
@default 2
|
|
3599
|
+
*/
|
|
3600
|
+
maxRetries?: number;
|
|
3601
|
+
/**
|
|
3602
|
+
Abort signal.
|
|
3603
|
+
*/
|
|
3604
|
+
abortSignal?: AbortSignal;
|
|
3605
|
+
/**
|
|
3606
|
+
Additional headers to include in the request.
|
|
3607
|
+
Only applicable for HTTP-based providers.
|
|
3608
|
+
*/
|
|
3609
|
+
headers?: Record<string, string>;
|
|
3610
|
+
/**
|
|
3611
|
+
* Optional telemetry configuration (experimental).
|
|
3612
|
+
*/
|
|
3613
|
+
experimental_telemetry?: TelemetrySettings;
|
|
2483
3614
|
/**
|
|
2484
3615
|
Additional provider-specific options. They are passed through
|
|
2485
3616
|
to the provider from the AI SDK and enable provider-specific
|
|
@@ -2494,9 +3625,9 @@ declare function embedMany<VALUE = string>({ model: modelArg, values, maxParalle
|
|
|
2494
3625
|
maxParallelCalls?: number;
|
|
2495
3626
|
}): Promise<EmbedManyResult<VALUE>>;
|
|
2496
3627
|
|
|
2497
|
-
declare const symbol$
|
|
3628
|
+
declare const symbol$b: unique symbol;
|
|
2498
3629
|
declare class InvalidArgumentError extends AISDKError {
|
|
2499
|
-
private readonly [symbol$
|
|
3630
|
+
private readonly [symbol$b];
|
|
2500
3631
|
readonly parameter: string;
|
|
2501
3632
|
readonly value: unknown;
|
|
2502
3633
|
constructor({ parameter, value, message, }: {
|
|
@@ -2538,6 +3669,8 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2538
3669
|
id: string;
|
|
2539
3670
|
toolName: string;
|
|
2540
3671
|
providerMetadata?: ProviderMetadata;
|
|
3672
|
+
dynamic?: boolean;
|
|
3673
|
+
title?: string;
|
|
2541
3674
|
} | {
|
|
2542
3675
|
type: 'tool-input-delta';
|
|
2543
3676
|
id: string;
|
|
@@ -2547,7 +3680,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2547
3680
|
type: 'tool-input-end';
|
|
2548
3681
|
id: string;
|
|
2549
3682
|
providerMetadata?: ProviderMetadata;
|
|
2550
|
-
} | ({
|
|
3683
|
+
} | ToolApprovalRequestOutput<TOOLS> | ({
|
|
2551
3684
|
type: 'source';
|
|
2552
3685
|
} & Source) | {
|
|
2553
3686
|
type: 'file';
|
|
@@ -2563,7 +3696,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2563
3696
|
file: GeneratedFile;
|
|
2564
3697
|
} | {
|
|
2565
3698
|
type: 'stream-start';
|
|
2566
|
-
warnings:
|
|
3699
|
+
warnings: LanguageModelV3CallWarning[];
|
|
2567
3700
|
} | {
|
|
2568
3701
|
type: 'response-metadata';
|
|
2569
3702
|
id?: string;
|
|
@@ -2582,9 +3715,9 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2582
3715
|
rawValue: unknown;
|
|
2583
3716
|
};
|
|
2584
3717
|
|
|
2585
|
-
declare const symbol$
|
|
3718
|
+
declare const symbol$a: unique symbol;
|
|
2586
3719
|
declare class InvalidStreamPartError extends AISDKError {
|
|
2587
|
-
private readonly [symbol$
|
|
3720
|
+
private readonly [symbol$a];
|
|
2588
3721
|
readonly chunk: SingleRequestTextStreamPart<any>;
|
|
2589
3722
|
constructor({ chunk, message, }: {
|
|
2590
3723
|
chunk: SingleRequestTextStreamPart<any>;
|
|
@@ -2593,21 +3726,7 @@ declare class InvalidStreamPartError extends AISDKError {
|
|
|
2593
3726
|
static isInstance(error: unknown): error is InvalidStreamPartError;
|
|
2594
3727
|
}
|
|
2595
3728
|
|
|
2596
|
-
declare const symbol$
|
|
2597
|
-
/**
|
|
2598
|
-
* An error occurred with the MCP client.
|
|
2599
|
-
*/
|
|
2600
|
-
declare class MCPClientError extends AISDKError {
|
|
2601
|
-
private readonly [symbol$b];
|
|
2602
|
-
constructor({ name, message, cause, }: {
|
|
2603
|
-
name?: string;
|
|
2604
|
-
message: string;
|
|
2605
|
-
cause?: unknown;
|
|
2606
|
-
});
|
|
2607
|
-
static isInstance(error: unknown): error is MCPClientError;
|
|
2608
|
-
}
|
|
2609
|
-
|
|
2610
|
-
declare const symbol$a: unique symbol;
|
|
3729
|
+
declare const symbol$9: unique symbol;
|
|
2611
3730
|
/**
|
|
2612
3731
|
Thrown when no image could be generated. This can have multiple causes:
|
|
2613
3732
|
|
|
@@ -2615,7 +3734,7 @@ Thrown when no image could be generated. This can have multiple causes:
|
|
|
2615
3734
|
- The model generated a response that could not be parsed.
|
|
2616
3735
|
*/
|
|
2617
3736
|
declare class NoImageGeneratedError extends AISDKError {
|
|
2618
|
-
private readonly [symbol$
|
|
3737
|
+
private readonly [symbol$9];
|
|
2619
3738
|
/**
|
|
2620
3739
|
The response metadata for each call.
|
|
2621
3740
|
*/
|
|
@@ -2628,7 +3747,7 @@ declare class NoImageGeneratedError extends AISDKError {
|
|
|
2628
3747
|
static isInstance(error: unknown): error is NoImageGeneratedError;
|
|
2629
3748
|
}
|
|
2630
3749
|
|
|
2631
|
-
declare const symbol$
|
|
3750
|
+
declare const symbol$8: unique symbol;
|
|
2632
3751
|
/**
|
|
2633
3752
|
Thrown when no object could be generated. This can have several causes:
|
|
2634
3753
|
|
|
@@ -2641,7 +3760,7 @@ The error contains the following properties:
|
|
|
2641
3760
|
- `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
|
|
2642
3761
|
*/
|
|
2643
3762
|
declare class NoObjectGeneratedError extends AISDKError {
|
|
2644
|
-
private readonly [symbol$
|
|
3763
|
+
private readonly [symbol$8];
|
|
2645
3764
|
/**
|
|
2646
3765
|
The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
|
|
2647
3766
|
*/
|
|
@@ -2669,12 +3788,12 @@ declare class NoObjectGeneratedError extends AISDKError {
|
|
|
2669
3788
|
static isInstance(error: unknown): error is NoObjectGeneratedError;
|
|
2670
3789
|
}
|
|
2671
3790
|
|
|
2672
|
-
declare const symbol$
|
|
3791
|
+
declare const symbol$7: unique symbol;
|
|
2673
3792
|
/**
|
|
2674
3793
|
Thrown when no LLM output was generated, e.g. because of errors.
|
|
2675
3794
|
*/
|
|
2676
3795
|
declare class NoOutputGeneratedError extends AISDKError {
|
|
2677
|
-
private readonly [symbol$
|
|
3796
|
+
private readonly [symbol$7];
|
|
2678
3797
|
constructor({ message, cause, }?: {
|
|
2679
3798
|
message?: string;
|
|
2680
3799
|
cause?: Error;
|
|
@@ -2682,16 +3801,14 @@ declare class NoOutputGeneratedError extends AISDKError {
|
|
|
2682
3801
|
static isInstance(error: unknown): error is NoOutputGeneratedError;
|
|
2683
3802
|
}
|
|
2684
3803
|
|
|
2685
|
-
declare const symbol$7: unique symbol;
|
|
2686
3804
|
/**
|
|
2687
|
-
|
|
3805
|
+
Error that is thrown when no speech audio was generated.
|
|
2688
3806
|
*/
|
|
2689
|
-
declare class
|
|
2690
|
-
|
|
2691
|
-
constructor(
|
|
2692
|
-
|
|
3807
|
+
declare class NoSpeechGeneratedError extends AISDKError {
|
|
3808
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
|
3809
|
+
constructor(options: {
|
|
3810
|
+
responses: Array<SpeechModelResponseMetadata>;
|
|
2693
3811
|
});
|
|
2694
|
-
static isInstance(error: unknown): error is NoOutputSpecifiedError;
|
|
2695
3812
|
}
|
|
2696
3813
|
|
|
2697
3814
|
declare const symbol$6: unique symbol;
|
|
@@ -2811,6 +3928,10 @@ interface GenerateImageResult {
|
|
|
2811
3928
|
* results that can be fully encapsulated in the provider.
|
|
2812
3929
|
*/
|
|
2813
3930
|
readonly providerMetadata: ImageModelProviderMetadata;
|
|
3931
|
+
/**
|
|
3932
|
+
Combined token usage across all underlying provider calls for this image generation.
|
|
3933
|
+
*/
|
|
3934
|
+
readonly usage: ImageModelUsage;
|
|
2814
3935
|
}
|
|
2815
3936
|
|
|
2816
3937
|
/**
|
|
@@ -2834,7 +3955,7 @@ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspec
|
|
|
2834
3955
|
/**
|
|
2835
3956
|
The image model to use.
|
|
2836
3957
|
*/
|
|
2837
|
-
model:
|
|
3958
|
+
model: ImageModelV3;
|
|
2838
3959
|
/**
|
|
2839
3960
|
The prompt that should be used to generate the image.
|
|
2840
3961
|
*/
|
|
@@ -3017,7 +4138,7 @@ functionality that can be fully encapsulated in the provider.
|
|
|
3017
4138
|
@returns
|
|
3018
4139
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
|
3019
4140
|
*/
|
|
3020
|
-
declare function generateObject<SCHEMA extends
|
|
4141
|
+
declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
|
3021
4142
|
/**
|
|
3022
4143
|
The enum values that the model should use.
|
|
3023
4144
|
*/
|
|
@@ -3361,7 +4482,7 @@ functionality that can be fully encapsulated in the provider.
|
|
|
3361
4482
|
@returns
|
|
3362
4483
|
A result object for accessing the partial object stream and additional information.
|
|
3363
4484
|
*/
|
|
3364
|
-
declare function streamObject<SCHEMA extends
|
|
4485
|
+
declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleSchema<JSONValue$1>, OUTPUT extends 'object' | 'array' | 'enum' | 'no-schema' = InferSchema<SCHEMA> extends string ? 'enum' : 'object', RESULT = OUTPUT extends 'array' ? Array<InferSchema<SCHEMA>> : InferSchema<SCHEMA>>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (OUTPUT extends 'enum' ? {
|
|
3365
4486
|
/**
|
|
3366
4487
|
The enum values that the model should use.
|
|
3367
4488
|
*/
|
|
@@ -3476,7 +4597,7 @@ interface SpeechResult {
|
|
|
3476
4597
|
/**
|
|
3477
4598
|
Provider metadata from the provider.
|
|
3478
4599
|
*/
|
|
3479
|
-
readonly providerMetadata: Record<string,
|
|
4600
|
+
readonly providerMetadata: Record<string, JSONObject>;
|
|
3480
4601
|
}
|
|
3481
4602
|
|
|
3482
4603
|
/**
|
|
@@ -3500,7 +4621,7 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
|
|
|
3500
4621
|
/**
|
|
3501
4622
|
The speech model to use.
|
|
3502
4623
|
*/
|
|
3503
|
-
model:
|
|
4624
|
+
model: SpeechModel;
|
|
3504
4625
|
/**
|
|
3505
4626
|
The text to convert to speech.
|
|
3506
4627
|
*/
|
|
@@ -3534,452 +4655,13 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
|
|
|
3534
4655
|
record is keyed by the provider-specific metadata key.
|
|
3535
4656
|
```ts
|
|
3536
4657
|
{
|
|
3537
|
-
"openai": {}
|
|
3538
|
-
}
|
|
3539
|
-
```
|
|
3540
|
-
*/
|
|
3541
|
-
providerOptions?: ProviderOptions;
|
|
3542
|
-
/**
|
|
3543
|
-
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
|
3544
|
-
|
|
3545
|
-
@default 2
|
|
3546
|
-
*/
|
|
3547
|
-
maxRetries?: number;
|
|
3548
|
-
/**
|
|
3549
|
-
Abort signal.
|
|
3550
|
-
*/
|
|
3551
|
-
abortSignal?: AbortSignal;
|
|
3552
|
-
/**
|
|
3553
|
-
Additional headers to include in the request.
|
|
3554
|
-
Only applicable for HTTP-based providers.
|
|
3555
|
-
*/
|
|
3556
|
-
headers?: Record<string, string>;
|
|
3557
|
-
}): Promise<SpeechResult>;
|
|
3558
|
-
|
|
3559
|
-
type Warning = LanguageModelV2CallWarning | ImageModelV2CallWarning | SpeechModelV2CallWarning | TranscriptionModelV2CallWarning;
|
|
3560
|
-
type LogWarningsFunction = (warnings: Warning[]) => void;
|
|
3561
|
-
|
|
3562
|
-
/**
|
|
3563
|
-
* Applies default settings for a language model.
|
|
3564
|
-
*/
|
|
3565
|
-
declare function defaultSettingsMiddleware({ settings, }: {
|
|
3566
|
-
settings: Partial<{
|
|
3567
|
-
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
|
|
3568
|
-
temperature?: LanguageModelV2CallOptions['temperature'];
|
|
3569
|
-
stopSequences?: LanguageModelV2CallOptions['stopSequences'];
|
|
3570
|
-
topP?: LanguageModelV2CallOptions['topP'];
|
|
3571
|
-
topK?: LanguageModelV2CallOptions['topK'];
|
|
3572
|
-
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
|
|
3573
|
-
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
|
|
3574
|
-
responseFormat?: LanguageModelV2CallOptions['responseFormat'];
|
|
3575
|
-
seed?: LanguageModelV2CallOptions['seed'];
|
|
3576
|
-
tools?: LanguageModelV2CallOptions['tools'];
|
|
3577
|
-
toolChoice?: LanguageModelV2CallOptions['toolChoice'];
|
|
3578
|
-
headers?: LanguageModelV2CallOptions['headers'];
|
|
3579
|
-
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
|
3580
|
-
}>;
|
|
3581
|
-
}): LanguageModelMiddleware;
|
|
3582
|
-
|
|
3583
|
-
/**
|
|
3584
|
-
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
|
3585
|
-
* as a `reasoning` property on the result.
|
|
3586
|
-
*
|
|
3587
|
-
* @param tagName - The name of the XML tag to extract reasoning from.
|
|
3588
|
-
* @param separator - The separator to use between reasoning and text sections.
|
|
3589
|
-
* @param startWithReasoning - Whether to start with reasoning tokens.
|
|
3590
|
-
*/
|
|
3591
|
-
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
|
3592
|
-
tagName: string;
|
|
3593
|
-
separator?: string;
|
|
3594
|
-
startWithReasoning?: boolean;
|
|
3595
|
-
}): LanguageModelMiddleware;
|
|
3596
|
-
|
|
3597
|
-
/**
|
|
3598
|
-
* Simulates streaming chunks with the response from a generate call.
|
|
3599
|
-
*/
|
|
3600
|
-
declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
|
|
3601
|
-
|
|
3602
|
-
/**
|
|
3603
|
-
* Wraps a LanguageModelV2 instance with middleware functionality.
|
|
3604
|
-
* This function allows you to apply middleware to transform parameters,
|
|
3605
|
-
* wrap generate operations, and wrap stream operations of a language model.
|
|
3606
|
-
*
|
|
3607
|
-
* @param options - Configuration options for wrapping the language model.
|
|
3608
|
-
* @param options.model - The original LanguageModelV2 instance to be wrapped.
|
|
3609
|
-
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3610
|
-
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
|
3611
|
-
* @param options.providerId - Optional custom provider ID to override the original model's provider ID.
|
|
3612
|
-
* @returns A new LanguageModelV2 instance with middleware applied.
|
|
3613
|
-
*/
|
|
3614
|
-
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
|
3615
|
-
model: LanguageModelV2;
|
|
3616
|
-
middleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3617
|
-
modelId?: string;
|
|
3618
|
-
providerId?: string;
|
|
3619
|
-
}) => LanguageModelV2;
|
|
3620
|
-
|
|
3621
|
-
/**
|
|
3622
|
-
* Wraps a ProviderV2 instance with middleware functionality.
|
|
3623
|
-
* This function allows you to apply middleware to all language models
|
|
3624
|
-
* from the provider, enabling you to transform parameters, wrap generate
|
|
3625
|
-
* operations, and wrap stream operations for every language model.
|
|
3626
|
-
*
|
|
3627
|
-
* @param options - Configuration options for wrapping the provider.
|
|
3628
|
-
* @param options.provider - The original ProviderV2 instance to be wrapped.
|
|
3629
|
-
* @param options.languageModelMiddleware - The middleware to be applied to all language models from the provider. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3630
|
-
* @returns A new ProviderV2 instance with middleware applied to all language models.
|
|
3631
|
-
*/
|
|
3632
|
-
declare function wrapProvider({ provider, languageModelMiddleware, }: {
|
|
3633
|
-
provider: ProviderV2;
|
|
3634
|
-
languageModelMiddleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3635
|
-
}): ProviderV2;
|
|
3636
|
-
|
|
3637
|
-
/**
|
|
3638
|
-
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
|
3639
|
-
*
|
|
3640
|
-
* @param {Object} options - The options for creating the custom provider.
|
|
3641
|
-
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
|
3642
|
-
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
|
3643
|
-
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
|
3644
|
-
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
|
3645
|
-
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
|
3646
|
-
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
|
3647
|
-
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
|
3648
|
-
*
|
|
3649
|
-
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
|
3650
|
-
*/
|
|
3651
|
-
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV2>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV2>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV2>, SPEECH_MODELS extends Record<string, SpeechModelV2>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, fallbackProvider, }: {
|
|
3652
|
-
languageModels?: LANGUAGE_MODELS;
|
|
3653
|
-
textEmbeddingModels?: EMBEDDING_MODELS;
|
|
3654
|
-
imageModels?: IMAGE_MODELS;
|
|
3655
|
-
transcriptionModels?: TRANSCRIPTION_MODELS;
|
|
3656
|
-
speechModels?: SPEECH_MODELS;
|
|
3657
|
-
fallbackProvider?: ProviderV2;
|
|
3658
|
-
}): ProviderV2 & {
|
|
3659
|
-
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV2;
|
|
3660
|
-
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
|
|
3661
|
-
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV2;
|
|
3662
|
-
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV2;
|
|
3663
|
-
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV2;
|
|
3664
|
-
};
|
|
3665
|
-
/**
|
|
3666
|
-
* @deprecated Use `customProvider` instead.
|
|
3667
|
-
*/
|
|
3668
|
-
declare const experimental_customProvider: typeof customProvider;
|
|
3669
|
-
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
|
3670
|
-
|
|
3671
|
-
declare const symbol: unique symbol;
|
|
3672
|
-
declare class NoSuchProviderError extends NoSuchModelError {
|
|
3673
|
-
private readonly [symbol];
|
|
3674
|
-
readonly providerId: string;
|
|
3675
|
-
readonly availableProviders: string[];
|
|
3676
|
-
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
|
3677
|
-
modelId: string;
|
|
3678
|
-
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel';
|
|
3679
|
-
providerId: string;
|
|
3680
|
-
availableProviders: string[];
|
|
3681
|
-
message?: string;
|
|
3682
|
-
});
|
|
3683
|
-
static isInstance(error: unknown): error is NoSuchProviderError;
|
|
3684
|
-
}
|
|
3685
|
-
|
|
3686
|
-
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
|
3687
|
-
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV2> = Record<string, ProviderV2>, SEPARATOR extends string = ':'> {
|
|
3688
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV2;
|
|
3689
|
-
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV2;
|
|
3690
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
|
|
3691
|
-
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
|
|
3692
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV2;
|
|
3693
|
-
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV2;
|
|
3694
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV2;
|
|
3695
|
-
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV2;
|
|
3696
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV2;
|
|
3697
|
-
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV2;
|
|
3698
|
-
}
|
|
3699
|
-
/**
|
|
3700
|
-
* Creates a registry for the given providers with optional middleware functionality.
|
|
3701
|
-
* This function allows you to register multiple providers and optionally apply middleware
|
|
3702
|
-
* to all language models from the registry, enabling you to transform parameters, wrap generate
|
|
3703
|
-
* operations, and wrap stream operations for every language model accessed through the registry.
|
|
3704
|
-
*
|
|
3705
|
-
* @param providers - A record of provider instances to be registered in the registry.
|
|
3706
|
-
* @param options - Configuration options for the provider registry.
|
|
3707
|
-
* @param options.separator - The separator used between provider ID and model ID in the combined identifier. Defaults to ':'.
|
|
3708
|
-
* @param options.languageModelMiddleware - Optional middleware to be applied to all language models from the registry. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
3709
|
-
* @returns A new ProviderRegistryProvider instance that provides access to all registered providers with optional middleware applied to language models.
|
|
3710
|
-
*/
|
|
3711
|
-
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV2>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, languageModelMiddleware, }?: {
|
|
3712
|
-
separator?: SEPARATOR;
|
|
3713
|
-
languageModelMiddleware?: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
3714
|
-
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
|
3715
|
-
/**
|
|
3716
|
-
* @deprecated Use `createProviderRegistry` instead.
|
|
3717
|
-
*/
|
|
3718
|
-
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
|
3719
|
-
|
|
3720
|
-
declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
|
|
3721
|
-
textStream: ReadableStream<string>;
|
|
3722
|
-
}): Response;
|
|
3723
|
-
|
|
3724
|
-
declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
|
|
3725
|
-
response: ServerResponse;
|
|
3726
|
-
textStream: ReadableStream<string>;
|
|
3727
|
-
} & ResponseInit): void;
|
|
3728
|
-
|
|
3729
|
-
declare const JSONRPCRequestSchema: z.ZodObject<{
|
|
3730
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3731
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3732
|
-
method: z.ZodString;
|
|
3733
|
-
params: z.ZodOptional<z.ZodObject<{
|
|
3734
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3735
|
-
}, z.core.$loose>>;
|
|
3736
|
-
}, z.core.$strict>;
|
|
3737
|
-
type JSONRPCRequest = z.infer<typeof JSONRPCRequestSchema>;
|
|
3738
|
-
declare const JSONRPCResponseSchema: z.ZodObject<{
|
|
3739
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3740
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3741
|
-
result: z.ZodObject<{
|
|
3742
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3743
|
-
}, z.core.$loose>;
|
|
3744
|
-
}, z.core.$strict>;
|
|
3745
|
-
type JSONRPCResponse = z.infer<typeof JSONRPCResponseSchema>;
|
|
3746
|
-
declare const JSONRPCErrorSchema: z.ZodObject<{
|
|
3747
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3748
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3749
|
-
error: z.ZodObject<{
|
|
3750
|
-
code: z.ZodNumber;
|
|
3751
|
-
message: z.ZodString;
|
|
3752
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
|
3753
|
-
}, z.core.$strip>;
|
|
3754
|
-
}, z.core.$strict>;
|
|
3755
|
-
type JSONRPCError = z.infer<typeof JSONRPCErrorSchema>;
|
|
3756
|
-
declare const JSONRPCNotificationSchema: z.ZodObject<{
|
|
3757
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3758
|
-
method: z.ZodString;
|
|
3759
|
-
params: z.ZodOptional<z.ZodObject<{
|
|
3760
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3761
|
-
}, z.core.$loose>>;
|
|
3762
|
-
}, z.core.$strict>;
|
|
3763
|
-
type JSONRPCNotification = z.infer<typeof JSONRPCNotificationSchema>;
|
|
3764
|
-
declare const JSONRPCMessageSchema: z.ZodUnion<readonly [z.ZodObject<{
|
|
3765
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3766
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3767
|
-
method: z.ZodString;
|
|
3768
|
-
params: z.ZodOptional<z.ZodObject<{
|
|
3769
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3770
|
-
}, z.core.$loose>>;
|
|
3771
|
-
}, z.core.$strict>, z.ZodObject<{
|
|
3772
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3773
|
-
method: z.ZodString;
|
|
3774
|
-
params: z.ZodOptional<z.ZodObject<{
|
|
3775
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3776
|
-
}, z.core.$loose>>;
|
|
3777
|
-
}, z.core.$strict>, z.ZodObject<{
|
|
3778
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3779
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3780
|
-
result: z.ZodObject<{
|
|
3781
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3782
|
-
}, z.core.$loose>;
|
|
3783
|
-
}, z.core.$strict>, z.ZodObject<{
|
|
3784
|
-
jsonrpc: z.ZodLiteral<"2.0">;
|
|
3785
|
-
id: z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>;
|
|
3786
|
-
error: z.ZodObject<{
|
|
3787
|
-
code: z.ZodNumber;
|
|
3788
|
-
message: z.ZodString;
|
|
3789
|
-
data: z.ZodOptional<z.ZodUnknown>;
|
|
3790
|
-
}, z.core.$strip>;
|
|
3791
|
-
}, z.core.$strict>]>;
|
|
3792
|
-
type JSONRPCMessage = z.infer<typeof JSONRPCMessageSchema>;
|
|
3793
|
-
|
|
3794
|
-
/**
|
|
3795
|
-
* Transport interface for MCP (Model Context Protocol) communication.
|
|
3796
|
-
* Maps to the `Transport` interface in the MCP spec.
|
|
3797
|
-
*/
|
|
3798
|
-
interface MCPTransport {
|
|
3799
|
-
/**
|
|
3800
|
-
* Initialize and start the transport
|
|
3801
|
-
*/
|
|
3802
|
-
start(): Promise<void>;
|
|
3803
|
-
/**
|
|
3804
|
-
* Send a JSON-RPC message through the transport
|
|
3805
|
-
* @param message The JSON-RPC message to send
|
|
3806
|
-
*/
|
|
3807
|
-
send(message: JSONRPCMessage): Promise<void>;
|
|
3808
|
-
/**
|
|
3809
|
-
* Clean up and close the transport
|
|
3810
|
-
*/
|
|
3811
|
-
close(): Promise<void>;
|
|
3812
|
-
/**
|
|
3813
|
-
* Event handler for transport closure
|
|
3814
|
-
*/
|
|
3815
|
-
onclose?: () => void;
|
|
3816
|
-
/**
|
|
3817
|
-
* Event handler for transport errors
|
|
3818
|
-
*/
|
|
3819
|
-
onerror?: (error: Error) => void;
|
|
3820
|
-
/**
|
|
3821
|
-
* Event handler for received messages
|
|
3822
|
-
*/
|
|
3823
|
-
onmessage?: (message: JSONRPCMessage) => void;
|
|
3824
|
-
}
|
|
3825
|
-
type MCPTransportConfig = {
|
|
3826
|
-
type: 'sse';
|
|
3827
|
-
/**
|
|
3828
|
-
* The URL of the MCP server.
|
|
3829
|
-
*/
|
|
3830
|
-
url: string;
|
|
3831
|
-
/**
|
|
3832
|
-
* Additional HTTP headers to be sent with requests.
|
|
3833
|
-
*/
|
|
3834
|
-
headers?: Record<string, string>;
|
|
3835
|
-
};
|
|
3836
|
-
|
|
3837
|
-
type ToolSchemas = Record<string, {
|
|
3838
|
-
inputSchema: FlexibleSchema<JSONObject | unknown>;
|
|
3839
|
-
}> | 'automatic' | undefined;
|
|
3840
|
-
type McpToolSet<TOOL_SCHEMAS extends ToolSchemas = 'automatic'> = TOOL_SCHEMAS extends Record<string, {
|
|
3841
|
-
inputSchema: FlexibleSchema<any>;
|
|
3842
|
-
}> ? {
|
|
3843
|
-
[K in keyof TOOL_SCHEMAS]: TOOL_SCHEMAS[K] extends {
|
|
3844
|
-
inputSchema: FlexibleSchema<infer INPUT>;
|
|
3845
|
-
} ? Tool<INPUT, CallToolResult> & Required<Pick<Tool<INPUT, CallToolResult>, 'execute'>> : never;
|
|
3846
|
-
} : McpToolSet<Record<string, {
|
|
3847
|
-
inputSchema: FlexibleSchema<unknown>;
|
|
3848
|
-
}>>;
|
|
3849
|
-
declare const CallToolResultSchema: z.ZodUnion<[z.ZodObject<{
|
|
3850
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3851
|
-
content: z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
|
3852
|
-
type: z.ZodLiteral<"text">;
|
|
3853
|
-
text: z.ZodString;
|
|
3854
|
-
}, z.core.$loose>, z.ZodObject<{
|
|
3855
|
-
type: z.ZodLiteral<"image">;
|
|
3856
|
-
data: z.ZodBase64;
|
|
3857
|
-
mimeType: z.ZodString;
|
|
3858
|
-
}, z.core.$loose>, z.ZodObject<{
|
|
3859
|
-
type: z.ZodLiteral<"resource">;
|
|
3860
|
-
resource: z.ZodUnion<readonly [z.ZodObject<{
|
|
3861
|
-
uri: z.ZodString;
|
|
3862
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
|
3863
|
-
text: z.ZodString;
|
|
3864
|
-
}, z.core.$loose>, z.ZodObject<{
|
|
3865
|
-
uri: z.ZodString;
|
|
3866
|
-
mimeType: z.ZodOptional<z.ZodString>;
|
|
3867
|
-
blob: z.ZodBase64;
|
|
3868
|
-
}, z.core.$loose>]>;
|
|
3869
|
-
}, z.core.$loose>]>>;
|
|
3870
|
-
isError: z.ZodOptional<z.ZodDefault<z.ZodBoolean>>;
|
|
3871
|
-
}, z.core.$loose>, z.ZodObject<{
|
|
3872
|
-
_meta: z.ZodOptional<z.ZodObject<{}, z.core.$loose>>;
|
|
3873
|
-
toolResult: z.ZodUnknown;
|
|
3874
|
-
}, z.core.$loose>]>;
|
|
3875
|
-
type CallToolResult = z.infer<typeof CallToolResultSchema>;
|
|
3876
|
-
|
|
3877
|
-
interface MCPClientConfig {
|
|
3878
|
-
/** Transport configuration for connecting to the MCP server */
|
|
3879
|
-
transport: MCPTransportConfig | MCPTransport;
|
|
3880
|
-
/** Optional callback for uncaught errors */
|
|
3881
|
-
onUncaughtError?: (error: unknown) => void;
|
|
3882
|
-
/** Optional client name, defaults to 'ai-sdk-mcp-client' */
|
|
3883
|
-
name?: string;
|
|
3884
|
-
}
|
|
3885
|
-
declare function createMCPClient(config: MCPClientConfig): Promise<MCPClient>;
|
|
3886
|
-
interface MCPClient {
|
|
3887
|
-
tools<TOOL_SCHEMAS extends ToolSchemas = 'automatic'>(options?: {
|
|
3888
|
-
schemas?: TOOL_SCHEMAS;
|
|
3889
|
-
}): Promise<McpToolSet<TOOL_SCHEMAS>>;
|
|
3890
|
-
close: () => Promise<void>;
|
|
3891
|
-
}
|
|
3892
|
-
|
|
3893
|
-
/**
|
|
3894
|
-
The result of a `transcribe` call.
|
|
3895
|
-
It contains the transcript and additional information.
|
|
3896
|
-
*/
|
|
3897
|
-
interface TranscriptionResult {
|
|
3898
|
-
/**
|
|
3899
|
-
* The complete transcribed text from the audio.
|
|
3900
|
-
*/
|
|
3901
|
-
readonly text: string;
|
|
3902
|
-
/**
|
|
3903
|
-
* Array of transcript segments with timing information.
|
|
3904
|
-
* Each segment represents a portion of the transcribed text with start and end times.
|
|
3905
|
-
*/
|
|
3906
|
-
readonly segments: Array<{
|
|
3907
|
-
/**
|
|
3908
|
-
* The text content of this segment.
|
|
3909
|
-
*/
|
|
3910
|
-
readonly text: string;
|
|
3911
|
-
/**
|
|
3912
|
-
* The start time of this segment in seconds.
|
|
3913
|
-
*/
|
|
3914
|
-
readonly startSecond: number;
|
|
3915
|
-
/**
|
|
3916
|
-
* The end time of this segment in seconds.
|
|
3917
|
-
*/
|
|
3918
|
-
readonly endSecond: number;
|
|
3919
|
-
}>;
|
|
3920
|
-
/**
|
|
3921
|
-
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
|
3922
|
-
* May be undefined if the language couldn't be detected.
|
|
3923
|
-
*/
|
|
3924
|
-
readonly language: string | undefined;
|
|
3925
|
-
/**
|
|
3926
|
-
* The total duration of the audio file in seconds.
|
|
3927
|
-
* May be undefined if the duration couldn't be determined.
|
|
3928
|
-
*/
|
|
3929
|
-
readonly durationInSeconds: number | undefined;
|
|
3930
|
-
/**
|
|
3931
|
-
Warnings for the call, e.g. unsupported settings.
|
|
3932
|
-
*/
|
|
3933
|
-
readonly warnings: Array<TranscriptionWarning>;
|
|
3934
|
-
/**
|
|
3935
|
-
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
|
3936
|
-
*/
|
|
3937
|
-
readonly responses: Array<TranscriptionModelResponseMetadata>;
|
|
3938
|
-
/**
|
|
3939
|
-
Provider metadata from the provider.
|
|
3940
|
-
*/
|
|
3941
|
-
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
|
3942
|
-
}
|
|
3943
|
-
|
|
3944
|
-
/**
|
|
3945
|
-
Generates transcripts using a transcription model.
|
|
3946
|
-
|
|
3947
|
-
@param model - The transcription model to use.
|
|
3948
|
-
@param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
|
|
3949
|
-
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
|
3950
|
-
as body parameters.
|
|
3951
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
3952
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
3953
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
3954
|
-
|
|
3955
|
-
@returns A result object that contains the generated transcript.
|
|
3956
|
-
*/
|
|
3957
|
-
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
|
3958
|
-
/**
|
|
3959
|
-
The transcription model to use.
|
|
3960
|
-
*/
|
|
3961
|
-
model: TranscriptionModelV2;
|
|
3962
|
-
/**
|
|
3963
|
-
The audio data to transcribe.
|
|
3964
|
-
*/
|
|
3965
|
-
audio: DataContent | URL;
|
|
3966
|
-
/**
|
|
3967
|
-
Additional provider-specific options that are passed through to the provider
|
|
3968
|
-
as body parameters.
|
|
3969
|
-
|
|
3970
|
-
The outer record is keyed by the provider name, and the inner
|
|
3971
|
-
record is keyed by the provider-specific metadata key.
|
|
3972
|
-
```ts
|
|
3973
|
-
{
|
|
3974
|
-
"openai": {
|
|
3975
|
-
"temperature": 0
|
|
3976
|
-
}
|
|
4658
|
+
"openai": {}
|
|
3977
4659
|
}
|
|
3978
4660
|
```
|
|
3979
4661
|
*/
|
|
3980
4662
|
providerOptions?: ProviderOptions;
|
|
3981
4663
|
/**
|
|
3982
|
-
Maximum number of retries per
|
|
4664
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
|
3983
4665
|
|
|
3984
4666
|
@default 2
|
|
3985
4667
|
*/
|
|
@@ -3993,613 +4675,439 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
|
|
|
3993
4675
|
Only applicable for HTTP-based providers.
|
|
3994
4676
|
*/
|
|
3995
4677
|
headers?: Record<string, string>;
|
|
3996
|
-
}): Promise<
|
|
3997
|
-
|
|
3998
|
-
declare const getOriginalFetch: () => typeof fetch;
|
|
3999
|
-
declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
|
|
4000
|
-
api: string;
|
|
4001
|
-
prompt: string;
|
|
4002
|
-
credentials: RequestCredentials | undefined;
|
|
4003
|
-
headers: HeadersInit | undefined;
|
|
4004
|
-
body: Record<string, any>;
|
|
4005
|
-
streamProtocol: 'data' | 'text' | undefined;
|
|
4006
|
-
setCompletion: (completion: string) => void;
|
|
4007
|
-
setLoading: (loading: boolean) => void;
|
|
4008
|
-
setError: (error: Error | undefined) => void;
|
|
4009
|
-
setAbortController: (abortController: AbortController | null) => void;
|
|
4010
|
-
onFinish: ((prompt: string, completion: string) => void) | undefined;
|
|
4011
|
-
onError: ((error: Error) => void) | undefined;
|
|
4012
|
-
fetch: ReturnType<typeof getOriginalFetch> | undefined;
|
|
4013
|
-
}): Promise<string | null | undefined>;
|
|
4678
|
+
}): Promise<SpeechResult>;
|
|
4014
4679
|
|
|
4015
|
-
|
|
4016
|
-
|
|
4017
|
-
|
|
4018
|
-
|
|
4019
|
-
|
|
4020
|
-
|
|
4021
|
-
* Merges the contents of another stream to this stream.
|
|
4022
|
-
*/
|
|
4023
|
-
merge(stream: ReadableStream<InferUIMessageChunk<UI_MESSAGE>>): void;
|
|
4024
|
-
/**
|
|
4025
|
-
* Error handler that is used by the data stream writer.
|
|
4026
|
-
* This is intended for forwarding when merging streams
|
|
4027
|
-
* to prevent duplicated error masking.
|
|
4028
|
-
*/
|
|
4029
|
-
onError: ErrorHandler | undefined;
|
|
4030
|
-
}
|
|
4680
|
+
type Warning = LanguageModelV3CallWarning | ImageModelV3CallWarning | SpeechModelV3CallWarning | TranscriptionModelV3CallWarning | SharedV3Warning;
|
|
4681
|
+
type LogWarningsFunction = (options: {
|
|
4682
|
+
warnings: Warning[];
|
|
4683
|
+
provider: string;
|
|
4684
|
+
model: string;
|
|
4685
|
+
}) => void;
|
|
4031
4686
|
|
|
4032
|
-
|
|
4033
|
-
|
|
4034
|
-
|
|
4035
|
-
|
|
4036
|
-
|
|
4037
|
-
|
|
4038
|
-
|
|
4039
|
-
|
|
4040
|
-
|
|
4041
|
-
originalMessages?: UI_MESSAGE[];
|
|
4042
|
-
onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
|
|
4043
|
-
generateId?: IdGenerator;
|
|
4044
|
-
}): ReadableStream<InferUIMessageChunk<UI_MESSAGE>>;
|
|
4687
|
+
/**
|
|
4688
|
+
* Applies default settings for a embedding model.
|
|
4689
|
+
*/
|
|
4690
|
+
declare function defaultEmbeddingSettingsMiddleware({ settings, }: {
|
|
4691
|
+
settings: Partial<{
|
|
4692
|
+
headers?: EmbeddingModelCallOptions<string>['headers'];
|
|
4693
|
+
providerOptions?: EmbeddingModelCallOptions<string>['providerOptions'];
|
|
4694
|
+
}>;
|
|
4695
|
+
}): EmbeddingModelMiddleware;
|
|
4045
4696
|
|
|
4046
|
-
|
|
4047
|
-
|
|
4048
|
-
|
|
4697
|
+
/**
|
|
4698
|
+
* Applies default settings for a language model.
|
|
4699
|
+
*/
|
|
4700
|
+
declare function defaultSettingsMiddleware({ settings, }: {
|
|
4701
|
+
settings: Partial<{
|
|
4702
|
+
maxOutputTokens?: LanguageModelV3CallOptions['maxOutputTokens'];
|
|
4703
|
+
temperature?: LanguageModelV3CallOptions['temperature'];
|
|
4704
|
+
stopSequences?: LanguageModelV3CallOptions['stopSequences'];
|
|
4705
|
+
topP?: LanguageModelV3CallOptions['topP'];
|
|
4706
|
+
topK?: LanguageModelV3CallOptions['topK'];
|
|
4707
|
+
presencePenalty?: LanguageModelV3CallOptions['presencePenalty'];
|
|
4708
|
+
frequencyPenalty?: LanguageModelV3CallOptions['frequencyPenalty'];
|
|
4709
|
+
responseFormat?: LanguageModelV3CallOptions['responseFormat'];
|
|
4710
|
+
seed?: LanguageModelV3CallOptions['seed'];
|
|
4711
|
+
tools?: LanguageModelV3CallOptions['tools'];
|
|
4712
|
+
toolChoice?: LanguageModelV3CallOptions['toolChoice'];
|
|
4713
|
+
headers?: LanguageModelV3CallOptions['headers'];
|
|
4714
|
+
providerOptions?: LanguageModelV3CallOptions['providerOptions'];
|
|
4715
|
+
}>;
|
|
4716
|
+
}): LanguageModelMiddleware;
|
|
4049
4717
|
|
|
4050
|
-
|
|
4051
|
-
|
|
4052
|
-
|
|
4718
|
+
/**
|
|
4719
|
+
* Extract an XML-tagged reasoning section from the generated text and exposes it
|
|
4720
|
+
* as a `reasoning` property on the result.
|
|
4721
|
+
*
|
|
4722
|
+
* @param tagName - The name of the XML tag to extract reasoning from.
|
|
4723
|
+
* @param separator - The separator to use between reasoning and text sections.
|
|
4724
|
+
* @param startWithReasoning - Whether to start with reasoning tokens.
|
|
4725
|
+
*/
|
|
4726
|
+
declare function extractReasoningMiddleware({ tagName, separator, startWithReasoning, }: {
|
|
4727
|
+
tagName: string;
|
|
4728
|
+
separator?: string;
|
|
4729
|
+
startWithReasoning?: boolean;
|
|
4730
|
+
}): LanguageModelMiddleware;
|
|
4053
4731
|
|
|
4054
|
-
|
|
4055
|
-
|
|
4056
|
-
|
|
4057
|
-
|
|
4732
|
+
/**
|
|
4733
|
+
* Simulates streaming chunks with the response from a generate call.
|
|
4734
|
+
*/
|
|
4735
|
+
declare function simulateStreamingMiddleware(): LanguageModelMiddleware;
|
|
4058
4736
|
|
|
4059
4737
|
/**
|
|
4060
|
-
*
|
|
4738
|
+
* Wraps a LanguageModelV3 instance with middleware functionality.
|
|
4739
|
+
* This function allows you to apply middleware to transform parameters,
|
|
4740
|
+
* wrap generate operations, and wrap stream operations of a language model.
|
|
4061
4741
|
*
|
|
4062
|
-
* @param options
|
|
4063
|
-
* @param options.
|
|
4064
|
-
* @param options.
|
|
4065
|
-
* @param options.
|
|
4742
|
+
* @param options - Configuration options for wrapping the language model.
|
|
4743
|
+
* @param options.model - The original LanguageModelV3 instance to be wrapped.
|
|
4744
|
+
* @param options.middleware - The middleware to be applied to the language model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
4745
|
+
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
|
4746
|
+
* @param options.providerId - Optional custom provider ID to override the original model's provider ID.
|
|
4747
|
+
* @returns A new LanguageModelV3 instance with middleware applied.
|
|
4748
|
+
*/
|
|
4749
|
+
declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
|
4750
|
+
model: LanguageModelV3;
|
|
4751
|
+
middleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
4752
|
+
modelId?: string;
|
|
4753
|
+
providerId?: string;
|
|
4754
|
+
}) => LanguageModelV3;
|
|
4755
|
+
|
|
4756
|
+
/**
|
|
4757
|
+
* Wraps a EmbeddingModelV3 instance with middleware functionality.
|
|
4758
|
+
* This function allows you to apply middleware to transform parameters,
|
|
4759
|
+
* wrap embed operations of a language model.
|
|
4066
4760
|
*
|
|
4067
|
-
* @
|
|
4068
|
-
*
|
|
4761
|
+
* @param options - Configuration options for wrapping the embedding model.
|
|
4762
|
+
* @param options.model - The original EmbeddingModelV3 instance to be wrapped.
|
|
4763
|
+
* @param options.middleware - The middleware to be applied to the embedding model. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
4764
|
+
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
|
4765
|
+
* @param options.providerId - Optional custom provider ID to override the original model's provider ID.
|
|
4766
|
+
* @returns A new EmbeddingModelV3 instance with middleware applied.
|
|
4069
4767
|
*/
|
|
4070
|
-
declare
|
|
4071
|
-
|
|
4072
|
-
|
|
4073
|
-
|
|
4074
|
-
|
|
4075
|
-
})
|
|
4768
|
+
declare const wrapEmbeddingModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
|
4769
|
+
model: EmbeddingModelV3<string>;
|
|
4770
|
+
middleware: EmbeddingModelMiddleware | EmbeddingModelMiddleware[];
|
|
4771
|
+
modelId?: string;
|
|
4772
|
+
providerId?: string;
|
|
4773
|
+
}) => EmbeddingModelV3<string>;
|
|
4076
4774
|
|
|
4077
|
-
|
|
4078
|
-
|
|
4079
|
-
|
|
4080
|
-
|
|
4081
|
-
|
|
4082
|
-
|
|
4083
|
-
|
|
4775
|
+
/**
|
|
4776
|
+
* Wraps a ProviderV3 instance with middleware functionality.
|
|
4777
|
+
* This function allows you to apply middleware to all language models
|
|
4778
|
+
* from the provider, enabling you to transform parameters, wrap generate
|
|
4779
|
+
* operations, and wrap stream operations for every language model.
|
|
4780
|
+
*
|
|
4781
|
+
* @param options - Configuration options for wrapping the provider.
|
|
4782
|
+
* @param options.provider - The original ProviderV3 instance to be wrapped.
|
|
4783
|
+
* @param options.languageModelMiddleware - The middleware to be applied to all language models from the provider. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
4784
|
+
* @returns A new ProviderV3 instance with middleware applied to all language models.
|
|
4785
|
+
*/
|
|
4786
|
+
declare function wrapProvider({ provider, languageModelMiddleware, }: {
|
|
4787
|
+
provider: ProviderV3 | ProviderV2;
|
|
4788
|
+
languageModelMiddleware: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
4789
|
+
}): ProviderV3;
|
|
4084
4790
|
|
|
4085
4791
|
/**
|
|
4086
|
-
*
|
|
4792
|
+
* Creates a custom provider with specified language models, text embedding models, image models, transcription models, speech models, and an optional fallback provider.
|
|
4087
4793
|
*
|
|
4088
|
-
*
|
|
4089
|
-
*
|
|
4090
|
-
*
|
|
4091
|
-
*
|
|
4794
|
+
* @param {Object} options - The options for creating the custom provider.
|
|
4795
|
+
* @param {Record<string, LanguageModel>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModel instances.
|
|
4796
|
+
* @param {Record<string, EmbeddingModel<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModel<string> instances.
|
|
4797
|
+
* @param {Record<string, ImageModel>} [options.imageModels] - A record of image models, where keys are model IDs and values are ImageModel instances.
|
|
4798
|
+
* @param {Record<string, TranscriptionModel>} [options.transcriptionModels] - A record of transcription models, where keys are model IDs and values are TranscriptionModel instances.
|
|
4799
|
+
* @param {Record<string, SpeechModel>} [options.speechModels] - A record of speech models, where keys are model IDs and values are SpeechModel instances.
|
|
4800
|
+
* @param {Record<string, RerankingModel<string>>} [options.rerankingModels] - A record of reranking models, where keys are model IDs and values are RerankingModel<string> instances.
|
|
4801
|
+
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
|
4802
|
+
* @returns {Provider} A Provider object with languageModel, textEmbeddingModel, imageModel, transcriptionModel, and speechModel methods.
|
|
4092
4803
|
*
|
|
4093
|
-
* @
|
|
4804
|
+
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
|
4094
4805
|
*/
|
|
4095
|
-
|
|
4096
|
-
|
|
4097
|
-
|
|
4098
|
-
|
|
4099
|
-
|
|
4100
|
-
|
|
4101
|
-
|
|
4102
|
-
|
|
4103
|
-
|
|
4104
|
-
|
|
4105
|
-
|
|
4106
|
-
|
|
4107
|
-
|
|
4108
|
-
|
|
4109
|
-
|
|
4110
|
-
* @param options.headers - Additional HTTP headers to include in the request
|
|
4111
|
-
* @param options.body - Additional JSON properties to include in the request body
|
|
4112
|
-
* @param options.metadata - Custom metadata to attach to the request
|
|
4113
|
-
*
|
|
4114
|
-
* @returns Promise resolving to a ReadableStream of UIMessageChunk objects.
|
|
4115
|
-
* The stream emits various chunk types like:
|
|
4116
|
-
* - `text-start`, `text-delta`, `text-end`: For streaming text content
|
|
4117
|
-
* - `tool-input-start`, `tool-input-delta`, `tool-input-available`: For tool calls
|
|
4118
|
-
* - `data-part-start`, `data-part-delta`, `data-part-available`: For data parts
|
|
4119
|
-
* - `error`: For error handling
|
|
4120
|
-
*
|
|
4121
|
-
* @throws Error when the API request fails or response is invalid
|
|
4122
|
-
*/
|
|
4123
|
-
sendMessages: (options: {
|
|
4124
|
-
/** The type of message submission - either new message or regeneration */
|
|
4125
|
-
trigger: 'submit-message' | 'regenerate-message';
|
|
4126
|
-
/** Unique identifier for the chat session */
|
|
4127
|
-
chatId: string;
|
|
4128
|
-
/** ID of the message to regenerate, or undefined for new messages */
|
|
4129
|
-
messageId: string | undefined;
|
|
4130
|
-
/** Array of UI messages representing the conversation history */
|
|
4131
|
-
messages: UI_MESSAGE[];
|
|
4132
|
-
/** Signal to abort the request if needed */
|
|
4133
|
-
abortSignal: AbortSignal | undefined;
|
|
4134
|
-
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk>>;
|
|
4135
|
-
/**
|
|
4136
|
-
* Reconnects to an existing streaming response for the specified chat session.
|
|
4137
|
-
*
|
|
4138
|
-
* This method is used to resume streaming when a connection is interrupted
|
|
4139
|
-
* or when resuming a chat session. It's particularly useful for maintaining
|
|
4140
|
-
* continuity in long-running conversations or recovering from network issues.
|
|
4141
|
-
*
|
|
4142
|
-
* @param options - Configuration object containing:
|
|
4143
|
-
* @param options.chatId - Unique identifier for the chat session to reconnect to
|
|
4144
|
-
* @param options.headers - Additional HTTP headers to include in the reconnection request
|
|
4145
|
-
* @param options.body - Additional JSON properties to include in the request body
|
|
4146
|
-
* @param options.metadata - Custom metadata to attach to the request
|
|
4147
|
-
*
|
|
4148
|
-
* @returns Promise resolving to:
|
|
4149
|
-
* - `ReadableStream<UIMessageChunk>`: If an active stream is found and can be resumed
|
|
4150
|
-
* - `null`: If no active stream exists for the specified chat session (e.g., response already completed)
|
|
4151
|
-
*
|
|
4152
|
-
* @throws Error when the reconnection request fails or response is invalid
|
|
4153
|
-
*/
|
|
4154
|
-
reconnectToStream: (options: {
|
|
4155
|
-
/** Unique identifier for the chat session to reconnect to */
|
|
4156
|
-
chatId: string;
|
|
4157
|
-
} & ChatRequestOptions) => Promise<ReadableStream<UIMessageChunk> | null>;
|
|
4158
|
-
}
|
|
4159
|
-
|
|
4160
|
-
type CreateUIMessage<UI_MESSAGE extends UIMessage> = Omit<UI_MESSAGE, 'id' | 'role'> & {
|
|
4161
|
-
id?: UI_MESSAGE['id'];
|
|
4162
|
-
role?: UI_MESSAGE['role'];
|
|
4163
|
-
};
|
|
4164
|
-
type UIDataPartSchemas = Record<string, Validator<any> | StandardSchemaV1<any>>;
|
|
4165
|
-
type UIDataTypesToSchemas<T extends UIDataTypes> = {
|
|
4166
|
-
[K in keyof T]: Validator<T[K]> | StandardSchemaV1<T[K]>;
|
|
4167
|
-
};
|
|
4168
|
-
type InferUIDataParts<T extends UIDataPartSchemas> = {
|
|
4169
|
-
[K in keyof T]: T[K] extends Validator<infer U> ? U : T[K] extends StandardSchemaV1<infer U> ? U : unknown;
|
|
4170
|
-
};
|
|
4171
|
-
type ChatRequestOptions = {
|
|
4172
|
-
/**
|
|
4173
|
-
Additional headers that should be to be passed to the API endpoint.
|
|
4174
|
-
*/
|
|
4175
|
-
headers?: Record<string, string> | Headers;
|
|
4176
|
-
/**
|
|
4177
|
-
Additional body JSON properties that should be sent to the API endpoint.
|
|
4178
|
-
*/
|
|
4179
|
-
body?: object;
|
|
4180
|
-
metadata?: unknown;
|
|
4806
|
+
declare function customProvider<LANGUAGE_MODELS extends Record<string, LanguageModelV3>, EMBEDDING_MODELS extends Record<string, EmbeddingModelV3<string>>, IMAGE_MODELS extends Record<string, ImageModelV3>, TRANSCRIPTION_MODELS extends Record<string, TranscriptionModelV3>, SPEECH_MODELS extends Record<string, SpeechModelV3>, RERANKING_MODELS extends Record<string, RerankingModelV3>>({ languageModels, textEmbeddingModels, imageModels, transcriptionModels, speechModels, rerankingModels, fallbackProvider: fallbackProviderArg, }: {
|
|
4807
|
+
languageModels?: LANGUAGE_MODELS;
|
|
4808
|
+
textEmbeddingModels?: EMBEDDING_MODELS;
|
|
4809
|
+
imageModels?: IMAGE_MODELS;
|
|
4810
|
+
transcriptionModels?: TRANSCRIPTION_MODELS;
|
|
4811
|
+
speechModels?: SPEECH_MODELS;
|
|
4812
|
+
rerankingModels?: RERANKING_MODELS;
|
|
4813
|
+
fallbackProvider?: ProviderV3 | ProviderV2;
|
|
4814
|
+
}): ProviderV3 & {
|
|
4815
|
+
languageModel(modelId: ExtractModelId<LANGUAGE_MODELS>): LanguageModelV3;
|
|
4816
|
+
textEmbeddingModel(modelId: ExtractModelId<EMBEDDING_MODELS>): EmbeddingModelV3<string>;
|
|
4817
|
+
imageModel(modelId: ExtractModelId<IMAGE_MODELS>): ImageModelV3;
|
|
4818
|
+
transcriptionModel(modelId: ExtractModelId<TRANSCRIPTION_MODELS>): TranscriptionModelV3;
|
|
4819
|
+
rerankingModel(modelId: ExtractModelId<RERANKING_MODELS>): RerankingModelV3;
|
|
4820
|
+
speechModel(modelId: ExtractModelId<SPEECH_MODELS>): SpeechModelV3;
|
|
4181
4821
|
};
|
|
4182
|
-
|
|
4183
|
-
|
|
4184
|
-
|
|
4185
|
-
|
|
4186
|
-
|
|
4187
|
-
|
|
4188
|
-
|
|
4189
|
-
|
|
4190
|
-
|
|
4822
|
+
/**
|
|
4823
|
+
* @deprecated Use `customProvider` instead.
|
|
4824
|
+
*/
|
|
4825
|
+
declare const experimental_customProvider: typeof customProvider;
|
|
4826
|
+
type ExtractModelId<MODELS extends Record<string, unknown>> = Extract<keyof MODELS, string>;
|
|
4827
|
+
|
|
4828
|
+
declare const symbol: unique symbol;
|
|
4829
|
+
declare class NoSuchProviderError extends NoSuchModelError {
|
|
4830
|
+
private readonly [symbol];
|
|
4831
|
+
readonly providerId: string;
|
|
4832
|
+
readonly availableProviders: string[];
|
|
4833
|
+
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
|
4834
|
+
modelId: string;
|
|
4835
|
+
modelType: 'languageModel' | 'textEmbeddingModel' | 'imageModel' | 'transcriptionModel' | 'speechModel' | 'rerankingModel';
|
|
4836
|
+
providerId: string;
|
|
4837
|
+
availableProviders: string[];
|
|
4838
|
+
message?: string;
|
|
4839
|
+
});
|
|
4840
|
+
static isInstance(error: unknown): error is NoSuchProviderError;
|
|
4841
|
+
}
|
|
4842
|
+
|
|
4843
|
+
type ExtractLiteralUnion<T> = T extends string ? string extends T ? never : T : never;
|
|
4844
|
+
interface ProviderRegistryProvider<PROVIDERS extends Record<string, ProviderV3> = Record<string, ProviderV3>, SEPARATOR extends string = ':'> {
|
|
4845
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['languageModel']>>[0]>}` : never): LanguageModelV3;
|
|
4846
|
+
languageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): LanguageModelV3;
|
|
4847
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['textEmbeddingModel']>>[0]>}` : never): EmbeddingModelV3<string>;
|
|
4848
|
+
textEmbeddingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): EmbeddingModelV3<string>;
|
|
4849
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['imageModel']>>[0]>}` : never): ImageModelV3;
|
|
4850
|
+
imageModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): ImageModelV3;
|
|
4851
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['transcriptionModel']>>[0]>}` : never): TranscriptionModelV3;
|
|
4852
|
+
transcriptionModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): TranscriptionModelV3;
|
|
4853
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['speechModel']>>[0]>}` : never): SpeechModelV3;
|
|
4854
|
+
speechModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): SpeechModelV3;
|
|
4855
|
+
rerankingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${ExtractLiteralUnion<Parameters<NonNullable<PROVIDERS[KEY]['rerankingModel']>>[0]>}` : never): RerankingModelV3;
|
|
4856
|
+
rerankingModel<KEY extends keyof PROVIDERS>(id: KEY extends string ? `${KEY & string}${SEPARATOR}${string}` : never): RerankingModelV3;
|
|
4191
4857
|
}
|
|
4192
|
-
type ChatOnErrorCallback = (error: Error) => void;
|
|
4193
|
-
type ChatOnToolCallCallback<UI_MESSAGE extends UIMessage = UIMessage> = (options: {
|
|
4194
|
-
toolCall: InferUIMessageToolCall<UI_MESSAGE>;
|
|
4195
|
-
}) => void | PromiseLike<void>;
|
|
4196
|
-
type ChatOnDataCallback<UI_MESSAGE extends UIMessage> = (dataPart: DataUIPart<InferUIMessageData<UI_MESSAGE>>) => void;
|
|
4197
4858
|
/**
|
|
4198
|
-
*
|
|
4199
|
-
*
|
|
4200
|
-
*
|
|
4201
|
-
*
|
|
4859
|
+
* Creates a registry for the given providers with optional middleware functionality.
|
|
4860
|
+
* This function allows you to register multiple providers and optionally apply middleware
|
|
4861
|
+
* to all language models from the registry, enabling you to transform parameters, wrap generate
|
|
4862
|
+
* operations, and wrap stream operations for every language model accessed through the registry.
|
|
4202
4863
|
*
|
|
4203
|
-
* @param
|
|
4204
|
-
* @param
|
|
4205
|
-
* @param
|
|
4864
|
+
* @param providers - A record of provider instances to be registered in the registry.
|
|
4865
|
+
* @param options - Configuration options for the provider registry.
|
|
4866
|
+
* @param options.separator - The separator used between provider ID and model ID in the combined identifier. Defaults to ':'.
|
|
4867
|
+
* @param options.languageModelMiddleware - Optional middleware to be applied to all language models from the registry. When multiple middlewares are provided, the first middleware will transform the input first, and the last middleware will be wrapped directly around the model.
|
|
4868
|
+
* @returns A new ProviderRegistryProvider instance that provides access to all registered providers with optional middleware applied to language models.
|
|
4206
4869
|
*/
|
|
4207
|
-
|
|
4208
|
-
|
|
4209
|
-
|
|
4210
|
-
|
|
4211
|
-
|
|
4212
|
-
|
|
4213
|
-
|
|
4214
|
-
|
|
4215
|
-
|
|
4216
|
-
|
|
4217
|
-
|
|
4218
|
-
|
|
4219
|
-
|
|
4220
|
-
|
|
4221
|
-
dataPartSchemas?: UIDataTypesToSchemas<InferUIMessageData<UI_MESSAGE>>;
|
|
4222
|
-
messages?: UI_MESSAGE[];
|
|
4223
|
-
/**
|
|
4224
|
-
* A way to provide a function that is going to be used for ids for messages and the chat.
|
|
4225
|
-
* If not provided the default AI SDK `generateId` is used.
|
|
4226
|
-
*/
|
|
4227
|
-
generateId?: IdGenerator;
|
|
4228
|
-
transport?: ChatTransport<UI_MESSAGE>;
|
|
4229
|
-
/**
|
|
4230
|
-
* Callback function to be called when an error is encountered.
|
|
4231
|
-
*/
|
|
4232
|
-
onError?: ChatOnErrorCallback;
|
|
4233
|
-
/**
|
|
4234
|
-
Optional callback function that is invoked when a tool call is received.
|
|
4235
|
-
Intended for automatic client-side tool execution.
|
|
4236
|
-
|
|
4237
|
-
You can optionally return a result for the tool call,
|
|
4238
|
-
either synchronously or asynchronously.
|
|
4239
|
-
*/
|
|
4240
|
-
onToolCall?: ChatOnToolCallCallback<UI_MESSAGE>;
|
|
4870
|
+
declare function createProviderRegistry<PROVIDERS extends Record<string, ProviderV3>, SEPARATOR extends string = ':'>(providers: PROVIDERS, { separator, languageModelMiddleware, }?: {
|
|
4871
|
+
separator?: SEPARATOR;
|
|
4872
|
+
languageModelMiddleware?: LanguageModelMiddleware | LanguageModelMiddleware[];
|
|
4873
|
+
}): ProviderRegistryProvider<PROVIDERS, SEPARATOR>;
|
|
4874
|
+
/**
|
|
4875
|
+
* @deprecated Use `createProviderRegistry` instead.
|
|
4876
|
+
*/
|
|
4877
|
+
declare const experimental_createProviderRegistry: typeof createProviderRegistry;
|
|
4878
|
+
|
|
4879
|
+
/**
|
|
4880
|
+
* The result of a `rerank` call.
|
|
4881
|
+
* It contains the original documents, the reranked documents, and additional information.
|
|
4882
|
+
*/
|
|
4883
|
+
interface RerankResult<VALUE> {
|
|
4241
4884
|
/**
|
|
4242
|
-
*
|
|
4885
|
+
* The original documents that were reranked.
|
|
4243
4886
|
*/
|
|
4244
|
-
|
|
4887
|
+
readonly originalDocuments: Array<VALUE>;
|
|
4245
4888
|
/**
|
|
4246
|
-
*
|
|
4889
|
+
* Reranked documents.
|
|
4247
4890
|
*
|
|
4248
|
-
*
|
|
4249
|
-
*/
|
|
4250
|
-
onData?: ChatOnDataCallback<UI_MESSAGE>;
|
|
4251
|
-
/**
|
|
4252
|
-
* When provided, this function will be called when the stream is finished or a tool call is added
|
|
4253
|
-
* to determine if the current messages should be resubmitted.
|
|
4254
|
-
*/
|
|
4255
|
-
sendAutomaticallyWhen?: (options: {
|
|
4256
|
-
messages: UI_MESSAGE[];
|
|
4257
|
-
}) => boolean | PromiseLike<boolean>;
|
|
4258
|
-
}
|
|
4259
|
-
declare abstract class AbstractChat<UI_MESSAGE extends UIMessage> {
|
|
4260
|
-
readonly id: string;
|
|
4261
|
-
readonly generateId: IdGenerator;
|
|
4262
|
-
protected state: ChatState<UI_MESSAGE>;
|
|
4263
|
-
private messageMetadataSchema;
|
|
4264
|
-
private dataPartSchemas;
|
|
4265
|
-
private readonly transport;
|
|
4266
|
-
private onError?;
|
|
4267
|
-
private onToolCall?;
|
|
4268
|
-
private onFinish?;
|
|
4269
|
-
private onData?;
|
|
4270
|
-
private sendAutomaticallyWhen?;
|
|
4271
|
-
private activeResponse;
|
|
4272
|
-
private jobExecutor;
|
|
4273
|
-
constructor({ generateId, id, transport, messageMetadataSchema, dataPartSchemas, state, onError, onToolCall, onFinish, onData, sendAutomaticallyWhen, }: Omit<ChatInit<UI_MESSAGE>, 'messages'> & {
|
|
4274
|
-
state: ChatState<UI_MESSAGE>;
|
|
4275
|
-
});
|
|
4276
|
-
/**
|
|
4277
|
-
* Hook status:
|
|
4891
|
+
* Sorted by relevance score in descending order.
|
|
4278
4892
|
*
|
|
4279
|
-
*
|
|
4280
|
-
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
|
4281
|
-
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
|
4282
|
-
* - `error`: An error occurred during the API request, preventing successful completion.
|
|
4893
|
+
* Can be less than the original documents if there was a topK limit.
|
|
4283
4894
|
*/
|
|
4284
|
-
|
|
4285
|
-
protected setStatus({ status, error, }: {
|
|
4286
|
-
status: ChatStatus;
|
|
4287
|
-
error?: Error;
|
|
4288
|
-
}): void;
|
|
4289
|
-
get error(): Error | undefined;
|
|
4290
|
-
get messages(): UI_MESSAGE[];
|
|
4291
|
-
get lastMessage(): UI_MESSAGE | undefined;
|
|
4292
|
-
set messages(messages: UI_MESSAGE[]);
|
|
4895
|
+
readonly rerankedDocuments: Array<VALUE>;
|
|
4293
4896
|
/**
|
|
4294
|
-
*
|
|
4295
|
-
* the
|
|
4897
|
+
* The ranking is a list of objects with the original index,
|
|
4898
|
+
* relevance score, and the reranked document.
|
|
4296
4899
|
*
|
|
4297
|
-
*
|
|
4298
|
-
|
|
4299
|
-
|
|
4300
|
-
text?: never;
|
|
4301
|
-
files?: never;
|
|
4302
|
-
messageId?: string;
|
|
4303
|
-
}) | {
|
|
4304
|
-
text: string;
|
|
4305
|
-
files?: FileList | FileUIPart[];
|
|
4306
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
|
4307
|
-
parts?: never;
|
|
4308
|
-
messageId?: string;
|
|
4309
|
-
} | {
|
|
4310
|
-
files: FileList | FileUIPart[];
|
|
4311
|
-
metadata?: InferUIMessageMetadata<UI_MESSAGE>;
|
|
4312
|
-
parts?: never;
|
|
4313
|
-
messageId?: string;
|
|
4314
|
-
}, options?: ChatRequestOptions) => Promise<void>;
|
|
4315
|
-
/**
|
|
4316
|
-
* Regenerate the assistant message with the provided message id.
|
|
4317
|
-
* If no message id is provided, the last assistant message will be regenerated.
|
|
4318
|
-
*/
|
|
4319
|
-
regenerate: ({ messageId, ...options }?: {
|
|
4320
|
-
messageId?: string;
|
|
4321
|
-
} & ChatRequestOptions) => Promise<void>;
|
|
4322
|
-
/**
|
|
4323
|
-
* Attempt to resume an ongoing streaming response.
|
|
4900
|
+
* Sorted by relevance score in descending order.
|
|
4901
|
+
*
|
|
4902
|
+
* Can be less than the original documents if there was a topK limit.
|
|
4324
4903
|
*/
|
|
4325
|
-
|
|
4904
|
+
readonly ranking: Array<{
|
|
4905
|
+
originalIndex: number;
|
|
4906
|
+
score: number;
|
|
4907
|
+
document: VALUE;
|
|
4908
|
+
}>;
|
|
4326
4909
|
/**
|
|
4327
|
-
*
|
|
4910
|
+
* Optional provider-specific metadata.
|
|
4328
4911
|
*/
|
|
4329
|
-
|
|
4330
|
-
addToolResult: <TOOL extends keyof InferUIMessageTools<UI_MESSAGE>>({ tool, toolCallId, output, }: {
|
|
4331
|
-
tool: TOOL;
|
|
4332
|
-
toolCallId: string;
|
|
4333
|
-
output: InferUIMessageTools<UI_MESSAGE>[TOOL]["output"];
|
|
4334
|
-
}) => Promise<void>;
|
|
4912
|
+
readonly providerMetadata?: ProviderMetadata;
|
|
4335
4913
|
/**
|
|
4336
|
-
*
|
|
4914
|
+
* Optional raw response data.
|
|
4337
4915
|
*/
|
|
4338
|
-
|
|
4339
|
-
|
|
4916
|
+
readonly response: {
|
|
4917
|
+
/**
|
|
4918
|
+
* ID for the generated response if the provider sends one.
|
|
4919
|
+
*/
|
|
4920
|
+
id?: string;
|
|
4921
|
+
/**
|
|
4922
|
+
* Timestamp of the generated response.
|
|
4923
|
+
*/
|
|
4924
|
+
timestamp: Date;
|
|
4925
|
+
/**
|
|
4926
|
+
* The ID of the model that was used to generate the response.
|
|
4927
|
+
*/
|
|
4928
|
+
modelId: string;
|
|
4929
|
+
/**
|
|
4930
|
+
* Response headers.
|
|
4931
|
+
*/
|
|
4932
|
+
headers?: Record<string, string>;
|
|
4933
|
+
/**
|
|
4934
|
+
* The response body.
|
|
4935
|
+
*/
|
|
4936
|
+
body?: unknown;
|
|
4937
|
+
};
|
|
4340
4938
|
}
|
|
4341
4939
|
|
|
4342
|
-
declare function convertFileListToFileUIParts(files: FileList | undefined): Promise<Array<FileUIPart>>;
|
|
4343
|
-
|
|
4344
4940
|
/**
|
|
4345
|
-
|
|
4346
|
-
with the AI core functions (e.g. `streamText`).
|
|
4941
|
+
Rerank documents using an reranking model. The type of the value is defined by the reranking model.
|
|
4347
4942
|
|
|
4348
|
-
@param
|
|
4349
|
-
@param
|
|
4350
|
-
@param
|
|
4351
|
-
|
|
4352
|
-
declare function convertToModelMessages(messages: Array<Omit<UIMessage, 'id'>>, options?: {
|
|
4353
|
-
tools?: ToolSet;
|
|
4354
|
-
ignoreIncompleteToolCalls?: boolean;
|
|
4355
|
-
}): ModelMessage[];
|
|
4356
|
-
/**
|
|
4357
|
-
@deprecated Use `convertToModelMessages` instead.
|
|
4358
|
-
*/
|
|
4359
|
-
declare const convertToCoreMessages: typeof convertToModelMessages;
|
|
4943
|
+
@param model - The Reranking model to use.
|
|
4944
|
+
@param documents - The documents that should be reranking.
|
|
4945
|
+
@param query - The query is a string that represents the query to rerank the documents against.
|
|
4946
|
+
@param topN - Top n documents to rerank.
|
|
4360
4947
|
|
|
4361
|
-
|
|
4362
|
-
|
|
4363
|
-
|
|
4364
|
-
|
|
4365
|
-
|
|
4366
|
-
credentials: RequestCredentials | undefined;
|
|
4367
|
-
headers: HeadersInit | undefined;
|
|
4368
|
-
api: string;
|
|
4369
|
-
} & {
|
|
4370
|
-
trigger: 'submit-message' | 'regenerate-message';
|
|
4371
|
-
messageId: string | undefined;
|
|
4372
|
-
}) => {
|
|
4373
|
-
body: object;
|
|
4374
|
-
headers?: HeadersInit;
|
|
4375
|
-
credentials?: RequestCredentials;
|
|
4376
|
-
api?: string;
|
|
4377
|
-
} | PromiseLike<{
|
|
4378
|
-
body: object;
|
|
4379
|
-
headers?: HeadersInit;
|
|
4380
|
-
credentials?: RequestCredentials;
|
|
4381
|
-
api?: string;
|
|
4382
|
-
}>;
|
|
4383
|
-
type PrepareReconnectToStreamRequest = (options: {
|
|
4384
|
-
id: string;
|
|
4385
|
-
requestMetadata: unknown;
|
|
4386
|
-
body: Record<string, any> | undefined;
|
|
4387
|
-
credentials: RequestCredentials | undefined;
|
|
4388
|
-
headers: HeadersInit | undefined;
|
|
4389
|
-
api: string;
|
|
4390
|
-
}) => {
|
|
4391
|
-
headers?: HeadersInit;
|
|
4392
|
-
credentials?: RequestCredentials;
|
|
4393
|
-
api?: string;
|
|
4394
|
-
} | PromiseLike<{
|
|
4395
|
-
headers?: HeadersInit;
|
|
4396
|
-
credentials?: RequestCredentials;
|
|
4397
|
-
api?: string;
|
|
4398
|
-
}>;
|
|
4399
|
-
/**
|
|
4400
|
-
* Options for the `HttpChatTransport` class.
|
|
4401
|
-
*
|
|
4402
|
-
* @param UI_MESSAGE - The type of message to be used in the chat.
|
|
4948
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
4949
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
4950
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
4951
|
+
|
|
4952
|
+
@returns A result object that contains the reranked documents, the reranked indices, and additional information.
|
|
4403
4953
|
*/
|
|
4404
|
-
|
|
4405
|
-
/**
|
|
4406
|
-
* The API URL to be used for the chat transport.
|
|
4407
|
-
* Defaults to '/api/chat'.
|
|
4408
|
-
*/
|
|
4409
|
-
api?: string;
|
|
4954
|
+
declare function rerank<VALUE extends JSONObject | string>({ model, documents, query, topN, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
|
4410
4955
|
/**
|
|
4411
|
-
|
|
4412
|
-
|
|
4413
|
-
|
|
4414
|
-
*/
|
|
4415
|
-
credentials?: Resolvable<RequestCredentials>;
|
|
4956
|
+
The reranking model to use.
|
|
4957
|
+
*/
|
|
4958
|
+
model: RerankingModel;
|
|
4416
4959
|
/**
|
|
4417
|
-
*
|
|
4960
|
+
* The documents that should be reranked.
|
|
4418
4961
|
*/
|
|
4419
|
-
|
|
4962
|
+
documents: Array<VALUE>;
|
|
4420
4963
|
/**
|
|
4421
|
-
|
|
4422
|
-
* @example
|
|
4423
|
-
* Send a `sessionId` to the API along with the messages.
|
|
4424
|
-
* ```js
|
|
4425
|
-
* useChat({
|
|
4426
|
-
* body: {
|
|
4427
|
-
* sessionId: '123',
|
|
4428
|
-
* }
|
|
4429
|
-
* })
|
|
4430
|
-
* ```
|
|
4964
|
+
The query is a string that represents the query to rerank the documents against.
|
|
4431
4965
|
*/
|
|
4432
|
-
|
|
4966
|
+
query: string;
|
|
4433
4967
|
/**
|
|
4434
|
-
|
|
4435
|
-
|
|
4436
|
-
|
|
4437
|
-
fetch?: FetchFunction;
|
|
4968
|
+
* Number of top documents to return.
|
|
4969
|
+
*/
|
|
4970
|
+
topN?: number;
|
|
4438
4971
|
/**
|
|
4439
|
-
|
|
4440
|
-
|
|
4441
|
-
|
|
4442
|
-
*
|
|
4443
|
-
* @param id The id of the chat.
|
|
4444
|
-
* @param messages The current messages in the chat.
|
|
4445
|
-
* @param requestBody The request body object passed in the chat request.
|
|
4972
|
+
Maximum number of retries per reranking model call. Set to 0 to disable retries.
|
|
4973
|
+
|
|
4974
|
+
@default 2
|
|
4446
4975
|
*/
|
|
4447
|
-
|
|
4976
|
+
maxRetries?: number;
|
|
4448
4977
|
/**
|
|
4449
|
-
|
|
4450
|
-
|
|
4451
|
-
|
|
4452
|
-
|
|
4453
|
-
|
|
4454
|
-
|
|
4455
|
-
|
|
4978
|
+
Abort signal.
|
|
4979
|
+
*/
|
|
4980
|
+
abortSignal?: AbortSignal;
|
|
4981
|
+
/**
|
|
4982
|
+
Additional headers to include in the request.
|
|
4983
|
+
Only applicable for HTTP-based providers.
|
|
4984
|
+
*/
|
|
4985
|
+
headers?: Record<string, string>;
|
|
4986
|
+
/**
|
|
4987
|
+
* Optional telemetry configuration (experimental).
|
|
4456
4988
|
*/
|
|
4457
|
-
|
|
4458
|
-
|
|
4459
|
-
|
|
4460
|
-
|
|
4461
|
-
|
|
4462
|
-
|
|
4463
|
-
|
|
4464
|
-
|
|
4465
|
-
protected prepareSendMessagesRequest?: PrepareSendMessagesRequest<UI_MESSAGE>;
|
|
4466
|
-
protected prepareReconnectToStreamRequest?: PrepareReconnectToStreamRequest;
|
|
4467
|
-
constructor({ api, credentials, headers, body, fetch, prepareSendMessagesRequest, prepareReconnectToStreamRequest, }: HttpChatTransportInitOptions<UI_MESSAGE>);
|
|
4468
|
-
sendMessages({ abortSignal, ...options }: Parameters<ChatTransport<UI_MESSAGE>['sendMessages']>[0]): Promise<ReadableStream<UIMessageChunk>>;
|
|
4469
|
-
reconnectToStream(options: Parameters<ChatTransport<UI_MESSAGE>['reconnectToStream']>[0]): Promise<ReadableStream<UIMessageChunk> | null>;
|
|
4470
|
-
protected abstract processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
4471
|
-
}
|
|
4989
|
+
experimental_telemetry?: TelemetrySettings;
|
|
4990
|
+
/**
|
|
4991
|
+
Additional provider-specific options. They are passed through
|
|
4992
|
+
to the provider from the AI SDK and enable provider-specific
|
|
4993
|
+
functionality that can be fully encapsulated in the provider.
|
|
4994
|
+
*/
|
|
4995
|
+
providerOptions?: ProviderOptions;
|
|
4996
|
+
}): Promise<RerankResult<VALUE>>;
|
|
4472
4997
|
|
|
4473
|
-
declare
|
|
4474
|
-
|
|
4475
|
-
|
|
4476
|
-
|
|
4998
|
+
declare function createTextStreamResponse({ status, statusText, headers, textStream, }: ResponseInit & {
|
|
4999
|
+
textStream: ReadableStream<string>;
|
|
5000
|
+
}): Response;
|
|
5001
|
+
|
|
5002
|
+
declare function pipeTextStreamToResponse({ response, status, statusText, headers, textStream, }: {
|
|
5003
|
+
response: ServerResponse;
|
|
5004
|
+
textStream: ReadableStream<string>;
|
|
5005
|
+
} & ResponseInit): void;
|
|
4477
5006
|
|
|
4478
5007
|
/**
|
|
4479
|
-
|
|
4480
|
-
|
|
4481
|
-
all tool invocations must have a result.
|
|
5008
|
+
The result of a `transcribe` call.
|
|
5009
|
+
It contains the transcript and additional information.
|
|
4482
5010
|
*/
|
|
4483
|
-
|
|
4484
|
-
messages: UIMessage[];
|
|
4485
|
-
}): boolean;
|
|
4486
|
-
|
|
4487
|
-
declare class TextStreamChatTransport<UI_MESSAGE extends UIMessage> extends HttpChatTransport<UI_MESSAGE> {
|
|
4488
|
-
constructor(options?: HttpChatTransportInitOptions<UI_MESSAGE>);
|
|
4489
|
-
protected processResponseStream(stream: ReadableStream<Uint8Array<ArrayBufferLike>>): ReadableStream<UIMessageChunk>;
|
|
4490
|
-
}
|
|
4491
|
-
|
|
4492
|
-
type CompletionRequestOptions = {
|
|
5011
|
+
interface TranscriptionResult {
|
|
4493
5012
|
/**
|
|
4494
|
-
|
|
5013
|
+
* The complete transcribed text from the audio.
|
|
4495
5014
|
*/
|
|
4496
|
-
|
|
4497
|
-
/**
|
|
4498
|
-
An optional object to be passed to the API endpoint.
|
|
4499
|
-
*/
|
|
4500
|
-
body?: object;
|
|
4501
|
-
};
|
|
4502
|
-
type UseCompletionOptions = {
|
|
5015
|
+
readonly text: string;
|
|
4503
5016
|
/**
|
|
4504
|
-
*
|
|
4505
|
-
*
|
|
5017
|
+
* Array of transcript segments with timing information.
|
|
5018
|
+
* Each segment represents a portion of the transcribed text with start and end times.
|
|
4506
5019
|
*/
|
|
4507
|
-
|
|
5020
|
+
readonly segments: Array<{
|
|
5021
|
+
/**
|
|
5022
|
+
* The text content of this segment.
|
|
5023
|
+
*/
|
|
5024
|
+
readonly text: string;
|
|
5025
|
+
/**
|
|
5026
|
+
* The start time of this segment in seconds.
|
|
5027
|
+
*/
|
|
5028
|
+
readonly startSecond: number;
|
|
5029
|
+
/**
|
|
5030
|
+
* The end time of this segment in seconds.
|
|
5031
|
+
*/
|
|
5032
|
+
readonly endSecond: number;
|
|
5033
|
+
}>;
|
|
4508
5034
|
/**
|
|
4509
|
-
*
|
|
4510
|
-
*
|
|
4511
|
-
* have shared states across components.
|
|
5035
|
+
* The detected language of the audio content, as an ISO-639-1 code (e.g., 'en' for English).
|
|
5036
|
+
* May be undefined if the language couldn't be detected.
|
|
4512
5037
|
*/
|
|
4513
|
-
|
|
5038
|
+
readonly language: string | undefined;
|
|
4514
5039
|
/**
|
|
4515
|
-
*
|
|
5040
|
+
* The total duration of the audio file in seconds.
|
|
5041
|
+
* May be undefined if the duration couldn't be determined.
|
|
4516
5042
|
*/
|
|
4517
|
-
|
|
5043
|
+
readonly durationInSeconds: number | undefined;
|
|
4518
5044
|
/**
|
|
4519
|
-
|
|
4520
|
-
|
|
4521
|
-
|
|
5045
|
+
Warnings for the call, e.g. unsupported settings.
|
|
5046
|
+
*/
|
|
5047
|
+
readonly warnings: Array<TranscriptionWarning>;
|
|
4522
5048
|
/**
|
|
4523
|
-
|
|
5049
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
|
4524
5050
|
*/
|
|
4525
|
-
|
|
5051
|
+
readonly responses: Array<TranscriptionModelResponseMetadata>;
|
|
4526
5052
|
/**
|
|
4527
|
-
|
|
5053
|
+
Provider metadata from the provider.
|
|
4528
5054
|
*/
|
|
4529
|
-
|
|
5055
|
+
readonly providerMetadata: Record<string, JSONObject>;
|
|
5056
|
+
}
|
|
5057
|
+
|
|
5058
|
+
/**
|
|
5059
|
+
Generates transcripts using a transcription model.
|
|
5060
|
+
|
|
5061
|
+
@param model - The transcription model to use.
|
|
5062
|
+
@param audio - The audio data to transcribe as DataContent (string | Uint8Array | ArrayBuffer | Buffer) or a URL.
|
|
5063
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
|
5064
|
+
as body parameters.
|
|
5065
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
|
5066
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
5067
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
5068
|
+
|
|
5069
|
+
@returns A result object that contains the generated transcript.
|
|
5070
|
+
*/
|
|
5071
|
+
declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
|
4530
5072
|
/**
|
|
4531
|
-
|
|
4532
|
-
|
|
4533
|
-
|
|
4534
|
-
*/
|
|
4535
|
-
credentials?: RequestCredentials;
|
|
5073
|
+
The transcription model to use.
|
|
5074
|
+
*/
|
|
5075
|
+
model: TranscriptionModel;
|
|
4536
5076
|
/**
|
|
4537
|
-
|
|
5077
|
+
The audio data to transcribe.
|
|
4538
5078
|
*/
|
|
4539
|
-
|
|
5079
|
+
audio: DataContent | URL;
|
|
4540
5080
|
/**
|
|
4541
|
-
|
|
4542
|
-
|
|
4543
|
-
|
|
4544
|
-
|
|
4545
|
-
|
|
4546
|
-
|
|
4547
|
-
|
|
4548
|
-
|
|
4549
|
-
|
|
4550
|
-
|
|
5081
|
+
Additional provider-specific options that are passed through to the provider
|
|
5082
|
+
as body parameters.
|
|
5083
|
+
|
|
5084
|
+
The outer record is keyed by the provider name, and the inner
|
|
5085
|
+
record is keyed by the provider-specific metadata key.
|
|
5086
|
+
```ts
|
|
5087
|
+
{
|
|
5088
|
+
"openai": {
|
|
5089
|
+
"temperature": 0
|
|
5090
|
+
}
|
|
5091
|
+
}
|
|
5092
|
+
```
|
|
5093
|
+
*/
|
|
5094
|
+
providerOptions?: ProviderOptions;
|
|
5095
|
+
/**
|
|
5096
|
+
Maximum number of retries per transcript model call. Set to 0 to disable retries.
|
|
5097
|
+
|
|
5098
|
+
@default 2
|
|
4551
5099
|
*/
|
|
4552
|
-
|
|
5100
|
+
maxRetries?: number;
|
|
4553
5101
|
/**
|
|
4554
|
-
|
|
4555
|
-
|
|
4556
|
-
|
|
5102
|
+
Abort signal.
|
|
5103
|
+
*/
|
|
5104
|
+
abortSignal?: AbortSignal;
|
|
4557
5105
|
/**
|
|
4558
|
-
|
|
4559
|
-
|
|
4560
|
-
|
|
4561
|
-
|
|
4562
|
-
}
|
|
4563
|
-
|
|
4564
|
-
type SafeValidateUIMessagesResult<UI_MESSAGE extends UIMessage> = {
|
|
4565
|
-
success: true;
|
|
4566
|
-
data: Array<UI_MESSAGE>;
|
|
4567
|
-
} | {
|
|
4568
|
-
success: false;
|
|
4569
|
-
error: Error;
|
|
4570
|
-
};
|
|
4571
|
-
/**
|
|
4572
|
-
* Validates a list of UI messages like `validateUIMessages`,
|
|
4573
|
-
* but instead of throwing it returns `{ success: true, data }`
|
|
4574
|
-
* or `{ success: false, error }`.
|
|
4575
|
-
*/
|
|
4576
|
-
declare function safeValidateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
|
|
4577
|
-
messages: unknown;
|
|
4578
|
-
metadataSchema?: Validator<UIMessage['metadata']> | StandardSchemaV1<unknown, UI_MESSAGE['metadata']>;
|
|
4579
|
-
dataSchemas?: {
|
|
4580
|
-
[NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: Validator<InferUIMessageData<UI_MESSAGE>[NAME]> | StandardSchemaV1<unknown, InferUIMessageData<UI_MESSAGE>[NAME]>;
|
|
4581
|
-
};
|
|
4582
|
-
tools?: {
|
|
4583
|
-
[NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
|
|
4584
|
-
};
|
|
4585
|
-
}): Promise<SafeValidateUIMessagesResult<UI_MESSAGE>>;
|
|
4586
|
-
/**
|
|
4587
|
-
* Validates a list of UI messages.
|
|
4588
|
-
*
|
|
4589
|
-
* Metadata, data parts, and generic tool call structures are only validated if
|
|
4590
|
-
* the corresponding schemas are provided. Otherwise, they are assumed to be
|
|
4591
|
-
* valid.
|
|
4592
|
-
*/
|
|
4593
|
-
declare function validateUIMessages<UI_MESSAGE extends UIMessage>({ messages, metadataSchema, dataSchemas, tools, }: {
|
|
4594
|
-
messages: unknown;
|
|
4595
|
-
metadataSchema?: Validator<UIMessage['metadata']> | StandardSchemaV1<unknown, UI_MESSAGE['metadata']>;
|
|
4596
|
-
dataSchemas?: {
|
|
4597
|
-
[NAME in keyof InferUIMessageData<UI_MESSAGE> & string]?: Validator<InferUIMessageData<UI_MESSAGE>[NAME]> | StandardSchemaV1<unknown, InferUIMessageData<UI_MESSAGE>[NAME]>;
|
|
4598
|
-
};
|
|
4599
|
-
tools?: {
|
|
4600
|
-
[NAME in keyof InferUIMessageTools<UI_MESSAGE> & string]?: Tool<InferUIMessageTools<UI_MESSAGE>[NAME]['input'], InferUIMessageTools<UI_MESSAGE>[NAME]['output']>;
|
|
4601
|
-
};
|
|
4602
|
-
}): Promise<Array<UI_MESSAGE>>;
|
|
5106
|
+
Additional headers to include in the request.
|
|
5107
|
+
Only applicable for HTTP-based providers.
|
|
5108
|
+
*/
|
|
5109
|
+
headers?: Record<string, string>;
|
|
5110
|
+
}): Promise<TranscriptionResult>;
|
|
4603
5111
|
|
|
4604
5112
|
declare global {
|
|
4605
5113
|
/**
|
|
@@ -4610,7 +5118,7 @@ declare global {
|
|
|
4610
5118
|
*
|
|
4611
5119
|
* @see https://ai-sdk.dev/docs/ai-sdk-core/provider-management#global-provider-configuration
|
|
4612
5120
|
*/
|
|
4613
|
-
var AI_SDK_DEFAULT_PROVIDER:
|
|
5121
|
+
var AI_SDK_DEFAULT_PROVIDER: ProviderV3 | undefined;
|
|
4614
5122
|
/**
|
|
4615
5123
|
* The warning logger to use for the AI SDK.
|
|
4616
5124
|
*
|
|
@@ -4621,4 +5129,4 @@ declare global {
|
|
|
4621
5129
|
var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
|
|
4622
5130
|
}
|
|
4623
5131
|
|
|
4624
|
-
export { AbstractChat, AsyncIterableStream, CallSettings, CallWarning, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler,
|
|
5132
|
+
export { AbstractChat, Agent, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelMiddleware, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImageModelUsage, InferAgentUIMessage, InferCompleteOutput as InferGenerateOutput, InferPartialOutput as InferStreamOutput, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RerankResult, RerankingModel, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultEmbeddingSettingsMiddleware, defaultSettingsMiddleware, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isFileUIPart, isReasoningUIPart, isTextUIPart, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, rerank, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapEmbeddingModel, wrapLanguageModel, wrapProvider };
|