ai 0.0.0-156c9f7b-20250115085202
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +2863 -0
- package/LICENSE +13 -0
- package/README.md +114 -0
- package/dist/index.d.mts +2620 -0
- package/dist/index.d.ts +2620 -0
- package/dist/index.js +6083 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +6048 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +117 -0
- package/react/dist/index.d.mts +10 -0
- package/react/dist/index.d.ts +10 -0
- package/react/dist/index.js +42 -0
- package/react/dist/index.js.map +1 -0
- package/react/dist/index.mjs +20 -0
- package/react/dist/index.mjs.map +1 -0
- package/rsc/dist/index.d.ts +723 -0
- package/rsc/dist/index.mjs +18 -0
- package/rsc/dist/rsc-client.d.mts +1 -0
- package/rsc/dist/rsc-client.mjs +18 -0
- package/rsc/dist/rsc-client.mjs.map +1 -0
- package/rsc/dist/rsc-server.d.mts +658 -0
- package/rsc/dist/rsc-server.mjs +1848 -0
- package/rsc/dist/rsc-server.mjs.map +1 -0
- package/rsc/dist/rsc-shared.d.mts +101 -0
- package/rsc/dist/rsc-shared.mjs +310 -0
- package/rsc/dist/rsc-shared.mjs.map +1 -0
- package/test/dist/index.d.mts +67 -0
- package/test/dist/index.d.ts +67 -0
- package/test/dist/index.js +131 -0
- package/test/dist/index.js.map +1 -0
- package/test/dist/index.mjs +101 -0
- package/test/dist/index.mjs.map +1 -0
package/dist/index.d.mts
ADDED
@@ -0,0 +1,2620 @@
|
|
1
|
+
import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
|
+
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
|
+
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
|
5
|
+
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
|
+
import { ServerResponse } from 'node:http';
|
7
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
8
|
+
import { z } from 'zod';
|
9
|
+
import { ServerResponse as ServerResponse$1 } from 'http';
|
10
|
+
|
11
|
+
interface DataStreamWriter {
|
12
|
+
/**
|
13
|
+
* Appends a data part to the stream.
|
14
|
+
*/
|
15
|
+
writeData(value: JSONValue): void;
|
16
|
+
/**
|
17
|
+
* Appends a message annotation to the stream.
|
18
|
+
*/
|
19
|
+
writeMessageAnnotation(value: JSONValue): void;
|
20
|
+
/**
|
21
|
+
* Merges the contents of another stream to this stream.
|
22
|
+
*/
|
23
|
+
merge(stream: ReadableStream<DataStreamString>): void;
|
24
|
+
/**
|
25
|
+
* Error handler that is used by the data stream writer.
|
26
|
+
* This is intended for forwarding when merging streams
|
27
|
+
* to prevent duplicated error masking.
|
28
|
+
*/
|
29
|
+
onError: ((error: unknown) => string) | undefined;
|
30
|
+
}
|
31
|
+
|
32
|
+
declare function createDataStream({ execute, onError, }: {
|
33
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
34
|
+
onError?: (error: unknown) => string;
|
35
|
+
}): ReadableStream<DataStreamString>;
|
36
|
+
|
37
|
+
declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
|
38
|
+
execute: (dataStream: DataStreamWriter) => Promise<void> | void;
|
39
|
+
onError?: (error: unknown) => string;
|
40
|
+
}): Response;
|
41
|
+
|
42
|
+
declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
|
43
|
+
execute: (writer: DataStreamWriter) => Promise<void> | void;
|
44
|
+
onError?: (error: unknown) => string;
|
45
|
+
}): void;
|
46
|
+
|
47
|
+
/**
|
48
|
+
* Telemetry configuration.
|
49
|
+
*/
|
50
|
+
type TelemetrySettings = {
|
51
|
+
/**
|
52
|
+
* Enable or disable telemetry. Disabled by default while experimental.
|
53
|
+
*/
|
54
|
+
isEnabled?: boolean;
|
55
|
+
/**
|
56
|
+
* Enable or disable input recording. Enabled by default.
|
57
|
+
*
|
58
|
+
* You might want to disable input recording to avoid recording sensitive
|
59
|
+
* information, to reduce data transfers, or to increase performance.
|
60
|
+
*/
|
61
|
+
recordInputs?: boolean;
|
62
|
+
/**
|
63
|
+
* Enable or disable output recording. Enabled by default.
|
64
|
+
*
|
65
|
+
* You might want to disable output recording to avoid recording sensitive
|
66
|
+
* information, to reduce data transfers, or to increase performance.
|
67
|
+
*/
|
68
|
+
recordOutputs?: boolean;
|
69
|
+
/**
|
70
|
+
* Identifier for this function. Used to group telemetry data by function.
|
71
|
+
*/
|
72
|
+
functionId?: string;
|
73
|
+
/**
|
74
|
+
* Additional information to include in the telemetry data.
|
75
|
+
*/
|
76
|
+
metadata?: Record<string, AttributeValue>;
|
77
|
+
/**
|
78
|
+
* A custom tracer to use for the telemetry data.
|
79
|
+
*/
|
80
|
+
tracer?: Tracer;
|
81
|
+
};
|
82
|
+
|
83
|
+
/**
|
84
|
+
Embedding model that is used by the AI SDK Core functions.
|
85
|
+
*/
|
86
|
+
type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
|
87
|
+
/**
|
88
|
+
Embedding.
|
89
|
+
*/
|
90
|
+
type Embedding = EmbeddingModelV1Embedding;
|
91
|
+
|
92
|
+
/**
|
93
|
+
Image model that is used by the AI SDK Core functions.
|
94
|
+
*/
|
95
|
+
type ImageModel = ImageModelV1;
|
96
|
+
/**
|
97
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
98
|
+
some settings might not be supported, which can lead to suboptimal results.
|
99
|
+
*/
|
100
|
+
type ImageGenerationWarning = ImageModelV1CallWarning;
|
101
|
+
|
102
|
+
/**
|
103
|
+
Language model that is used by the AI SDK Core functions.
|
104
|
+
*/
|
105
|
+
type LanguageModel = LanguageModelV1;
|
106
|
+
/**
|
107
|
+
Reason why a language model finished generating a response.
|
108
|
+
|
109
|
+
Can be one of the following:
|
110
|
+
- `stop`: model generated stop sequence
|
111
|
+
- `length`: model generated maximum number of tokens
|
112
|
+
- `content-filter`: content filter violation stopped the model
|
113
|
+
- `tool-calls`: model triggered tool calls
|
114
|
+
- `error`: model stopped because of an error
|
115
|
+
- `other`: model stopped for other reasons
|
116
|
+
*/
|
117
|
+
type FinishReason = LanguageModelV1FinishReason;
|
118
|
+
/**
|
119
|
+
Log probabilities for each token and its top log probabilities.
|
120
|
+
|
121
|
+
@deprecated Will become a provider extension in the future.
|
122
|
+
*/
|
123
|
+
type LogProbs = LanguageModelV1LogProbs;
|
124
|
+
/**
|
125
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
126
|
+
some settings might not be supported, which can lead to suboptimal results.
|
127
|
+
*/
|
128
|
+
type CallWarning = LanguageModelV1CallWarning;
|
129
|
+
/**
|
130
|
+
Tool choice for the generation. It supports the following settings:
|
131
|
+
|
132
|
+
- `auto` (default): the model can choose whether and which tools to call.
|
133
|
+
- `required`: the model must call a tool. It can choose which tool to call.
|
134
|
+
- `none`: the model must not call tools
|
135
|
+
- `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
|
136
|
+
*/
|
137
|
+
type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
138
|
+
type: 'tool';
|
139
|
+
toolName: keyof TOOLS;
|
140
|
+
};
|
141
|
+
|
142
|
+
type LanguageModelRequestMetadata = {
|
143
|
+
/**
|
144
|
+
Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
|
145
|
+
*/
|
146
|
+
body?: string;
|
147
|
+
};
|
148
|
+
|
149
|
+
type LanguageModelResponseMetadata = {
|
150
|
+
/**
|
151
|
+
ID for the generated response.
|
152
|
+
*/
|
153
|
+
id: string;
|
154
|
+
/**
|
155
|
+
Timestamp for the start of the generated response.
|
156
|
+
*/
|
157
|
+
timestamp: Date;
|
158
|
+
/**
|
159
|
+
The ID of the response model that was used to generate the response.
|
160
|
+
*/
|
161
|
+
modelId: string;
|
162
|
+
/**
|
163
|
+
Response headers.
|
164
|
+
*/
|
165
|
+
headers?: Record<string, string>;
|
166
|
+
};
|
167
|
+
|
168
|
+
/**
|
169
|
+
* Provider for language and text embedding models.
|
170
|
+
*/
|
171
|
+
type Provider = {
|
172
|
+
/**
|
173
|
+
Returns the language model with the given id.
|
174
|
+
The model id is then passed to the provider function to get the model.
|
175
|
+
|
176
|
+
@param {string} id - The id of the model to return.
|
177
|
+
|
178
|
+
@returns {LanguageModel} The language model associated with the id
|
179
|
+
|
180
|
+
@throws {NoSuchModelError} If no such model exists.
|
181
|
+
*/
|
182
|
+
languageModel(modelId: string): LanguageModel;
|
183
|
+
/**
|
184
|
+
Returns the text embedding model with the given id.
|
185
|
+
The model id is then passed to the provider function to get the model.
|
186
|
+
|
187
|
+
@param {string} id - The id of the model to return.
|
188
|
+
|
189
|
+
@returns {LanguageModel} The language model associated with the id
|
190
|
+
|
191
|
+
@throws {NoSuchModelError} If no such model exists.
|
192
|
+
*/
|
193
|
+
textEmbeddingModel(modelId: string): EmbeddingModel<string>;
|
194
|
+
};
|
195
|
+
|
196
|
+
/**
|
197
|
+
Additional provider-specific metadata. They are passed through
|
198
|
+
to the provider from the AI SDK and enable provider-specific
|
199
|
+
functionality that can be fully encapsulated in the provider.
|
200
|
+
*/
|
201
|
+
type ProviderMetadata = LanguageModelV1ProviderMetadata;
|
202
|
+
|
203
|
+
/**
|
204
|
+
Represents the number of tokens used in a prompt and completion.
|
205
|
+
*/
|
206
|
+
type LanguageModelUsage = {
|
207
|
+
/**
|
208
|
+
The number of tokens used in the prompt.
|
209
|
+
*/
|
210
|
+
promptTokens: number;
|
211
|
+
/**
|
212
|
+
The number of tokens used in the completion.
|
213
|
+
*/
|
214
|
+
completionTokens: number;
|
215
|
+
/**
|
216
|
+
The total number of tokens used (promptTokens + completionTokens).
|
217
|
+
*/
|
218
|
+
totalTokens: number;
|
219
|
+
};
|
220
|
+
/**
|
221
|
+
Represents the number of tokens used in an embedding.
|
222
|
+
*/
|
223
|
+
type EmbeddingModelUsage = {
|
224
|
+
/**
|
225
|
+
The number of tokens used in the embedding.
|
226
|
+
*/
|
227
|
+
tokens: number;
|
228
|
+
};
|
229
|
+
|
230
|
+
/**
|
231
|
+
The result of an `embed` call.
|
232
|
+
It contains the embedding, the value, and additional information.
|
233
|
+
*/
|
234
|
+
interface EmbedResult<VALUE> {
|
235
|
+
/**
|
236
|
+
The value that was embedded.
|
237
|
+
*/
|
238
|
+
readonly value: VALUE;
|
239
|
+
/**
|
240
|
+
The embedding of the value.
|
241
|
+
*/
|
242
|
+
readonly embedding: Embedding;
|
243
|
+
/**
|
244
|
+
The embedding token usage.
|
245
|
+
*/
|
246
|
+
readonly usage: EmbeddingModelUsage;
|
247
|
+
/**
|
248
|
+
Optional raw response data.
|
249
|
+
*/
|
250
|
+
readonly rawResponse?: {
|
251
|
+
/**
|
252
|
+
Response headers.
|
253
|
+
*/
|
254
|
+
headers?: Record<string, string>;
|
255
|
+
};
|
256
|
+
}
|
257
|
+
|
258
|
+
/**
|
259
|
+
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
260
|
+
|
261
|
+
@param model - The embedding model to use.
|
262
|
+
@param value - The value that should be embedded.
|
263
|
+
|
264
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
265
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
266
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
267
|
+
|
268
|
+
@returns A result object that contains the embedding, the value, and additional information.
|
269
|
+
*/
|
270
|
+
declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
271
|
+
/**
|
272
|
+
The embedding model to use.
|
273
|
+
*/
|
274
|
+
model: EmbeddingModel<VALUE>;
|
275
|
+
/**
|
276
|
+
The value that should be embedded.
|
277
|
+
*/
|
278
|
+
value: VALUE;
|
279
|
+
/**
|
280
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
281
|
+
|
282
|
+
@default 2
|
283
|
+
*/
|
284
|
+
maxRetries?: number;
|
285
|
+
/**
|
286
|
+
Abort signal.
|
287
|
+
*/
|
288
|
+
abortSignal?: AbortSignal;
|
289
|
+
/**
|
290
|
+
Additional headers to include in the request.
|
291
|
+
Only applicable for HTTP-based providers.
|
292
|
+
*/
|
293
|
+
headers?: Record<string, string>;
|
294
|
+
/**
|
295
|
+
* Optional telemetry configuration (experimental).
|
296
|
+
*/
|
297
|
+
experimental_telemetry?: TelemetrySettings;
|
298
|
+
}): Promise<EmbedResult<VALUE>>;
|
299
|
+
|
300
|
+
/**
|
301
|
+
The result of a `embedMany` call.
|
302
|
+
It contains the embeddings, the values, and additional information.
|
303
|
+
*/
|
304
|
+
interface EmbedManyResult<VALUE> {
|
305
|
+
/**
|
306
|
+
The values that were embedded.
|
307
|
+
*/
|
308
|
+
readonly values: Array<VALUE>;
|
309
|
+
/**
|
310
|
+
The embeddings. They are in the same order as the values.
|
311
|
+
*/
|
312
|
+
readonly embeddings: Array<Embedding>;
|
313
|
+
/**
|
314
|
+
The embedding token usage.
|
315
|
+
*/
|
316
|
+
readonly usage: EmbeddingModelUsage;
|
317
|
+
}
|
318
|
+
|
319
|
+
/**
|
320
|
+
Embed several values using an embedding model. The type of the value is defined
|
321
|
+
by the embedding model.
|
322
|
+
|
323
|
+
`embedMany` automatically splits large requests into smaller chunks if the model
|
324
|
+
has a limit on how many embeddings can be generated in a single call.
|
325
|
+
|
326
|
+
@param model - The embedding model to use.
|
327
|
+
@param values - The values that should be embedded.
|
328
|
+
|
329
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
330
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
331
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
332
|
+
|
333
|
+
@returns A result object that contains the embeddings, the value, and additional information.
|
334
|
+
*/
|
335
|
+
declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
336
|
+
/**
|
337
|
+
The embedding model to use.
|
338
|
+
*/
|
339
|
+
model: EmbeddingModel<VALUE>;
|
340
|
+
/**
|
341
|
+
The values that should be embedded.
|
342
|
+
*/
|
343
|
+
values: Array<VALUE>;
|
344
|
+
/**
|
345
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
346
|
+
|
347
|
+
@default 2
|
348
|
+
*/
|
349
|
+
maxRetries?: number;
|
350
|
+
/**
|
351
|
+
Abort signal.
|
352
|
+
*/
|
353
|
+
abortSignal?: AbortSignal;
|
354
|
+
/**
|
355
|
+
Additional headers to include in the request.
|
356
|
+
Only applicable for HTTP-based providers.
|
357
|
+
*/
|
358
|
+
headers?: Record<string, string>;
|
359
|
+
/**
|
360
|
+
* Optional telemetry configuration (experimental).
|
361
|
+
*/
|
362
|
+
experimental_telemetry?: TelemetrySettings;
|
363
|
+
}): Promise<EmbedManyResult<VALUE>>;
|
364
|
+
|
365
|
+
/**
|
366
|
+
The result of a `generateImage` call.
|
367
|
+
It contains the images and additional information.
|
368
|
+
*/
|
369
|
+
interface GenerateImageResult {
|
370
|
+
/**
|
371
|
+
The first image that was generated.
|
372
|
+
*/
|
373
|
+
readonly image: GeneratedImage;
|
374
|
+
/**
|
375
|
+
The images that were generated.
|
376
|
+
*/
|
377
|
+
readonly images: Array<GeneratedImage>;
|
378
|
+
/**
|
379
|
+
Warnings for the call, e.g. unsupported settings.
|
380
|
+
*/
|
381
|
+
readonly warnings: Array<ImageGenerationWarning>;
|
382
|
+
}
|
383
|
+
interface GeneratedImage {
|
384
|
+
/**
|
385
|
+
Image as a base64 encoded string.
|
386
|
+
*/
|
387
|
+
readonly base64: string;
|
388
|
+
/**
|
389
|
+
Image as a Uint8Array.
|
390
|
+
*/
|
391
|
+
readonly uint8Array: Uint8Array;
|
392
|
+
}
|
393
|
+
|
394
|
+
/**
|
395
|
+
Generates images using an image model.
|
396
|
+
|
397
|
+
@param model - The image model to use.
|
398
|
+
@param prompt - The prompt that should be used to generate the image.
|
399
|
+
@param n - Number of images to generate. Default: 1.
|
400
|
+
@param size - Size of the images to generate. Must have the format `{width}x{height}`.
|
401
|
+
@param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
|
402
|
+
@param seed - Seed for the image generation.
|
403
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
404
|
+
as body parameters.
|
405
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
406
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
407
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
408
|
+
|
409
|
+
@returns A result object that contains the generated images.
|
410
|
+
*/
|
411
|
+
declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
412
|
+
/**
|
413
|
+
The image model to use.
|
414
|
+
*/
|
415
|
+
model: ImageModelV1;
|
416
|
+
/**
|
417
|
+
The prompt that should be used to generate the image.
|
418
|
+
*/
|
419
|
+
prompt: string;
|
420
|
+
/**
|
421
|
+
Number of images to generate.
|
422
|
+
*/
|
423
|
+
n?: number;
|
424
|
+
/**
|
425
|
+
Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
|
426
|
+
*/
|
427
|
+
size?: `${number}x${number}`;
|
428
|
+
/**
|
429
|
+
Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
|
430
|
+
*/
|
431
|
+
aspectRatio?: `${number}:${number}`;
|
432
|
+
/**
|
433
|
+
Seed for the image generation. If not provided, the default seed will be used.
|
434
|
+
*/
|
435
|
+
seed?: number;
|
436
|
+
/**
|
437
|
+
Additional provider-specific options that are passed through to the provider
|
438
|
+
as body parameters.
|
439
|
+
|
440
|
+
The outer record is keyed by the provider name, and the inner
|
441
|
+
record is keyed by the provider-specific metadata key.
|
442
|
+
```ts
|
443
|
+
{
|
444
|
+
"openai": {
|
445
|
+
"style": "vivid"
|
446
|
+
}
|
447
|
+
}
|
448
|
+
```
|
449
|
+
*/
|
450
|
+
providerOptions?: Record<string, Record<string, JSONValue>>;
|
451
|
+
/**
|
452
|
+
Maximum number of retries per embedding model call. Set to 0 to disable retries.
|
453
|
+
|
454
|
+
@default 2
|
455
|
+
*/
|
456
|
+
maxRetries?: number;
|
457
|
+
/**
|
458
|
+
Abort signal.
|
459
|
+
*/
|
460
|
+
abortSignal?: AbortSignal;
|
461
|
+
/**
|
462
|
+
Additional headers to include in the request.
|
463
|
+
Only applicable for HTTP-based providers.
|
464
|
+
*/
|
465
|
+
headers?: Record<string, string>;
|
466
|
+
}): Promise<GenerateImageResult>;
|
467
|
+
|
468
|
+
type CallSettings = {
|
469
|
+
/**
|
470
|
+
Maximum number of tokens to generate.
|
471
|
+
*/
|
472
|
+
maxTokens?: number;
|
473
|
+
/**
|
474
|
+
Temperature setting. This is a number between 0 (almost no randomness) and
|
475
|
+
1 (very random).
|
476
|
+
|
477
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
478
|
+
|
479
|
+
@default 0
|
480
|
+
*/
|
481
|
+
temperature?: number;
|
482
|
+
/**
|
483
|
+
Nucleus sampling. This is a number between 0 and 1.
|
484
|
+
|
485
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
486
|
+
are considered.
|
487
|
+
|
488
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
489
|
+
*/
|
490
|
+
topP?: number;
|
491
|
+
/**
|
492
|
+
Only sample from the top K options for each subsequent token.
|
493
|
+
|
494
|
+
Used to remove "long tail" low probability responses.
|
495
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
496
|
+
*/
|
497
|
+
topK?: number;
|
498
|
+
/**
|
499
|
+
Presence penalty setting. It affects the likelihood of the model to
|
500
|
+
repeat information that is already in the prompt.
|
501
|
+
|
502
|
+
The presence penalty is a number between -1 (increase repetition)
|
503
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
504
|
+
*/
|
505
|
+
presencePenalty?: number;
|
506
|
+
/**
|
507
|
+
Frequency penalty setting. It affects the likelihood of the model
|
508
|
+
to repeatedly use the same words or phrases.
|
509
|
+
|
510
|
+
The frequency penalty is a number between -1 (increase repetition)
|
511
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
512
|
+
*/
|
513
|
+
frequencyPenalty?: number;
|
514
|
+
/**
|
515
|
+
Stop sequences.
|
516
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
517
|
+
Providers may have limits on the number of stop sequences.
|
518
|
+
*/
|
519
|
+
stopSequences?: string[];
|
520
|
+
/**
|
521
|
+
The seed (integer) to use for random sampling. If set and supported
|
522
|
+
by the model, calls will generate deterministic results.
|
523
|
+
*/
|
524
|
+
seed?: number;
|
525
|
+
/**
|
526
|
+
Maximum number of retries. Set to 0 to disable retries.
|
527
|
+
|
528
|
+
@default 2
|
529
|
+
*/
|
530
|
+
maxRetries?: number;
|
531
|
+
/**
|
532
|
+
Abort signal.
|
533
|
+
*/
|
534
|
+
abortSignal?: AbortSignal;
|
535
|
+
/**
|
536
|
+
Additional HTTP headers to be sent with the request.
|
537
|
+
Only applicable for HTTP-based providers.
|
538
|
+
*/
|
539
|
+
headers?: Record<string, string | undefined>;
|
540
|
+
};
|
541
|
+
|
542
|
+
/**
|
543
|
+
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
|
544
|
+
*/
|
545
|
+
type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
|
546
|
+
|
547
|
+
type ToolResultContent = Array<{
|
548
|
+
type: 'text';
|
549
|
+
text: string;
|
550
|
+
} | {
|
551
|
+
type: 'image';
|
552
|
+
data: string;
|
553
|
+
mimeType?: string;
|
554
|
+
}>;
|
555
|
+
|
556
|
+
/**
|
557
|
+
Text content part of a prompt. It contains a string of text.
|
558
|
+
*/
|
559
|
+
interface TextPart {
|
560
|
+
type: 'text';
|
561
|
+
/**
|
562
|
+
The text content.
|
563
|
+
*/
|
564
|
+
text: string;
|
565
|
+
/**
|
566
|
+
Additional provider-specific metadata. They are passed through
|
567
|
+
to the provider from the AI SDK and enable provider-specific
|
568
|
+
functionality that can be fully encapsulated in the provider.
|
569
|
+
*/
|
570
|
+
experimental_providerMetadata?: ProviderMetadata;
|
571
|
+
}
|
572
|
+
/**
|
573
|
+
Image content part of a prompt. It contains an image.
|
574
|
+
*/
|
575
|
+
interface ImagePart {
|
576
|
+
type: 'image';
|
577
|
+
/**
|
578
|
+
Image data. Can either be:
|
579
|
+
|
580
|
+
- data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
|
581
|
+
- URL: a URL that points to the image
|
582
|
+
*/
|
583
|
+
image: DataContent | URL;
|
584
|
+
/**
|
585
|
+
Optional mime type of the image.
|
586
|
+
*/
|
587
|
+
mimeType?: string;
|
588
|
+
/**
|
589
|
+
Additional provider-specific metadata. They are passed through
|
590
|
+
to the provider from the AI SDK and enable provider-specific
|
591
|
+
functionality that can be fully encapsulated in the provider.
|
592
|
+
*/
|
593
|
+
experimental_providerMetadata?: ProviderMetadata;
|
594
|
+
}
|
595
|
+
/**
|
596
|
+
File content part of a prompt. It contains a file.
|
597
|
+
*/
|
598
|
+
interface FilePart {
|
599
|
+
type: 'file';
|
600
|
+
/**
|
601
|
+
File data. Can either be:
|
602
|
+
|
603
|
+
- data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
|
604
|
+
- URL: a URL that points to the image
|
605
|
+
*/
|
606
|
+
data: DataContent | URL;
|
607
|
+
/**
|
608
|
+
Mime type of the file.
|
609
|
+
*/
|
610
|
+
mimeType: string;
|
611
|
+
/**
|
612
|
+
Additional provider-specific metadata. They are passed through
|
613
|
+
to the provider from the AI SDK and enable provider-specific
|
614
|
+
functionality that can be fully encapsulated in the provider.
|
615
|
+
*/
|
616
|
+
experimental_providerMetadata?: ProviderMetadata;
|
617
|
+
}
|
618
|
+
/**
|
619
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
620
|
+
*/
|
621
|
+
interface ToolCallPart {
|
622
|
+
type: 'tool-call';
|
623
|
+
/**
|
624
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
625
|
+
*/
|
626
|
+
toolCallId: string;
|
627
|
+
/**
|
628
|
+
Name of the tool that is being called.
|
629
|
+
*/
|
630
|
+
toolName: string;
|
631
|
+
/**
|
632
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
633
|
+
*/
|
634
|
+
args: unknown;
|
635
|
+
/**
|
636
|
+
Additional provider-specific metadata. They are passed through
|
637
|
+
to the provider from the AI SDK and enable provider-specific
|
638
|
+
functionality that can be fully encapsulated in the provider.
|
639
|
+
*/
|
640
|
+
experimental_providerMetadata?: ProviderMetadata;
|
641
|
+
}
|
642
|
+
/**
|
643
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
644
|
+
*/
|
645
|
+
interface ToolResultPart {
|
646
|
+
type: 'tool-result';
|
647
|
+
/**
|
648
|
+
ID of the tool call that this result is associated with.
|
649
|
+
*/
|
650
|
+
toolCallId: string;
|
651
|
+
/**
|
652
|
+
Name of the tool that generated this result.
|
653
|
+
*/
|
654
|
+
toolName: string;
|
655
|
+
/**
|
656
|
+
Result of the tool call. This is a JSON-serializable object.
|
657
|
+
*/
|
658
|
+
result: unknown;
|
659
|
+
/**
|
660
|
+
Multi-part content of the tool result. Only for tools that support multipart results.
|
661
|
+
*/
|
662
|
+
experimental_content?: ToolResultContent;
|
663
|
+
/**
|
664
|
+
Optional flag if the result is an error or an error message.
|
665
|
+
*/
|
666
|
+
isError?: boolean;
|
667
|
+
/**
|
668
|
+
Additional provider-specific metadata. They are passed through
|
669
|
+
to the provider from the AI SDK and enable provider-specific
|
670
|
+
functionality that can be fully encapsulated in the provider.
|
671
|
+
*/
|
672
|
+
experimental_providerMetadata?: ProviderMetadata;
|
673
|
+
}
|
674
|
+
|
675
|
+
/**
|
676
|
+
A system message. It can contain system information.
|
677
|
+
|
678
|
+
Note: using the "system" part of the prompt is strongly preferred
|
679
|
+
to increase the resilience against prompt injection attacks,
|
680
|
+
and because not all providers support several system messages.
|
681
|
+
*/
|
682
|
+
type CoreSystemMessage = {
|
683
|
+
role: 'system';
|
684
|
+
content: string;
|
685
|
+
/**
|
686
|
+
Additional provider-specific metadata. They are passed through
|
687
|
+
to the provider from the AI SDK and enable provider-specific
|
688
|
+
functionality that can be fully encapsulated in the provider.
|
689
|
+
*/
|
690
|
+
experimental_providerMetadata?: ProviderMetadata;
|
691
|
+
};
|
692
|
+
/**
|
693
|
+
A user message. It can contain text or a combination of text and images.
|
694
|
+
*/
|
695
|
+
type CoreUserMessage = {
|
696
|
+
role: 'user';
|
697
|
+
content: UserContent;
|
698
|
+
/**
|
699
|
+
Additional provider-specific metadata. They are passed through
|
700
|
+
to the provider from the AI SDK and enable provider-specific
|
701
|
+
functionality that can be fully encapsulated in the provider.
|
702
|
+
*/
|
703
|
+
experimental_providerMetadata?: ProviderMetadata;
|
704
|
+
};
|
705
|
+
/**
|
706
|
+
Content of a user message. It can be a string or an array of text and image parts.
|
707
|
+
*/
|
708
|
+
type UserContent = string | Array<TextPart | ImagePart | FilePart>;
|
709
|
+
/**
|
710
|
+
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
|
711
|
+
*/
|
712
|
+
type CoreAssistantMessage = {
|
713
|
+
role: 'assistant';
|
714
|
+
content: AssistantContent;
|
715
|
+
/**
|
716
|
+
Additional provider-specific metadata. They are passed through
|
717
|
+
to the provider from the AI SDK and enable provider-specific
|
718
|
+
functionality that can be fully encapsulated in the provider.
|
719
|
+
*/
|
720
|
+
experimental_providerMetadata?: ProviderMetadata;
|
721
|
+
};
|
722
|
+
/**
|
723
|
+
Content of an assistant message. It can be a string or an array of text and tool call parts.
|
724
|
+
*/
|
725
|
+
type AssistantContent = string | Array<TextPart | ToolCallPart>;
|
726
|
+
/**
|
727
|
+
A tool message. It contains the result of one or more tool calls.
|
728
|
+
*/
|
729
|
+
type CoreToolMessage = {
|
730
|
+
role: 'tool';
|
731
|
+
content: ToolContent;
|
732
|
+
/**
|
733
|
+
Additional provider-specific metadata. They are passed through
|
734
|
+
to the provider from the AI SDK and enable provider-specific
|
735
|
+
functionality that can be fully encapsulated in the provider.
|
736
|
+
*/
|
737
|
+
experimental_providerMetadata?: ProviderMetadata;
|
738
|
+
};
|
739
|
+
/**
|
740
|
+
Content of a tool message. It is an array of tool result parts.
|
741
|
+
*/
|
742
|
+
type ToolContent = Array<ToolResultPart>;
|
743
|
+
/**
|
744
|
+
A message that can be used in the `messages` field of a prompt.
|
745
|
+
It can be a user message, an assistant message, or a tool message.
|
746
|
+
*/
|
747
|
+
type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
|
748
|
+
|
749
|
+
type UIMessage = {
|
750
|
+
role: 'system' | 'user' | 'assistant' | 'data';
|
751
|
+
content: string;
|
752
|
+
toolInvocations?: ToolInvocation[];
|
753
|
+
experimental_attachments?: Attachment[];
|
754
|
+
};
|
755
|
+
|
756
|
+
/**
|
757
|
+
Prompt part of the AI function options.
|
758
|
+
It contains a system message, a simple text prompt, or a list of messages.
|
759
|
+
*/
|
760
|
+
type Prompt = {
|
761
|
+
/**
|
762
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
763
|
+
*/
|
764
|
+
system?: string;
|
765
|
+
/**
|
766
|
+
A simple text prompt. You can either use `prompt` or `messages` but not both.
|
767
|
+
*/
|
768
|
+
prompt?: string;
|
769
|
+
/**
|
770
|
+
A list of messages. You can either use `prompt` or `messages` but not both.
|
771
|
+
*/
|
772
|
+
messages?: Array<CoreMessage> | Array<UIMessage>;
|
773
|
+
};
|
774
|
+
|
775
|
+
/**
|
776
|
+
The result of a `generateObject` call.
|
777
|
+
*/
|
778
|
+
interface GenerateObjectResult<OBJECT> {
|
779
|
+
/**
|
780
|
+
The generated object (typed according to the schema).
|
781
|
+
*/
|
782
|
+
readonly object: OBJECT;
|
783
|
+
/**
|
784
|
+
The reason why the generation finished.
|
785
|
+
*/
|
786
|
+
readonly finishReason: FinishReason;
|
787
|
+
/**
|
788
|
+
The token usage of the generated text.
|
789
|
+
*/
|
790
|
+
readonly usage: LanguageModelUsage;
|
791
|
+
/**
|
792
|
+
Warnings from the model provider (e.g. unsupported settings).
|
793
|
+
*/
|
794
|
+
readonly warnings: CallWarning[] | undefined;
|
795
|
+
/**
|
796
|
+
Additional request information.
|
797
|
+
*/
|
798
|
+
readonly request: LanguageModelRequestMetadata;
|
799
|
+
/**
|
800
|
+
Additional response information.
|
801
|
+
*/
|
802
|
+
readonly response: LanguageModelResponseMetadata;
|
803
|
+
/**
|
804
|
+
Logprobs for the completion.
|
805
|
+
`undefined` if the mode does not support logprobs or if was not enabled.
|
806
|
+
|
807
|
+
@deprecated Will become a provider extension in the future.
|
808
|
+
*/
|
809
|
+
readonly logprobs: LogProbs | undefined;
|
810
|
+
/**
|
811
|
+
Additional provider-specific metadata. They are passed through
|
812
|
+
from the provider to the AI SDK and enable provider-specific
|
813
|
+
results that can be fully encapsulated in the provider.
|
814
|
+
*/
|
815
|
+
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
816
|
+
/**
|
817
|
+
Converts the object to a JSON response.
|
818
|
+
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
819
|
+
*/
|
820
|
+
toJsonResponse(init?: ResponseInit): Response;
|
821
|
+
}
|
822
|
+
|
823
|
+
/**
|
824
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
825
|
+
|
826
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
827
|
+
|
828
|
+
@returns
|
829
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
830
|
+
*/
|
831
|
+
declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
832
|
+
output?: 'object' | undefined;
|
833
|
+
/**
|
834
|
+
The language model to use.
|
835
|
+
*/
|
836
|
+
model: LanguageModel;
|
837
|
+
/**
|
838
|
+
The schema of the object that the model should generate.
|
839
|
+
*/
|
840
|
+
schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
|
841
|
+
/**
|
842
|
+
Optional name of the output that should be generated.
|
843
|
+
Used by some providers for additional LLM guidance, e.g.
|
844
|
+
via tool or schema name.
|
845
|
+
*/
|
846
|
+
schemaName?: string;
|
847
|
+
/**
|
848
|
+
Optional description of the output that should be generated.
|
849
|
+
Used by some providers for additional LLM guidance, e.g.
|
850
|
+
via tool or schema description.
|
851
|
+
*/
|
852
|
+
schemaDescription?: string;
|
853
|
+
/**
|
854
|
+
The mode to use for object generation.
|
855
|
+
|
856
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
857
|
+
|
858
|
+
- 'auto': The provider will choose the best mode for the model.
|
859
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
860
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
861
|
+
|
862
|
+
Please note that most providers do not support all modes.
|
863
|
+
|
864
|
+
Default and recommended: 'auto' (best mode for the model).
|
865
|
+
*/
|
866
|
+
mode?: 'auto' | 'json' | 'tool';
|
867
|
+
/**
|
868
|
+
Optional telemetry configuration (experimental).
|
869
|
+
*/
|
870
|
+
experimental_telemetry?: TelemetrySettings;
|
871
|
+
/**
|
872
|
+
Additional provider-specific metadata. They are passed through
|
873
|
+
to the provider from the AI SDK and enable provider-specific
|
874
|
+
functionality that can be fully encapsulated in the provider.
|
875
|
+
*/
|
876
|
+
experimental_providerMetadata?: ProviderMetadata;
|
877
|
+
/**
|
878
|
+
* Internal. For test use only. May change without notice.
|
879
|
+
*/
|
880
|
+
_internal?: {
|
881
|
+
generateId?: () => string;
|
882
|
+
currentDate?: () => Date;
|
883
|
+
};
|
884
|
+
}): Promise<GenerateObjectResult<OBJECT>>;
|
885
|
+
/**
|
886
|
+
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
887
|
+
|
888
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
889
|
+
|
890
|
+
@return
|
891
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
892
|
+
*/
|
893
|
+
declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
894
|
+
output: 'array';
|
895
|
+
/**
|
896
|
+
The language model to use.
|
897
|
+
*/
|
898
|
+
model: LanguageModel;
|
899
|
+
/**
|
900
|
+
The element schema of the array that the model should generate.
|
901
|
+
*/
|
902
|
+
schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
|
903
|
+
/**
|
904
|
+
Optional name of the array that should be generated.
|
905
|
+
Used by some providers for additional LLM guidance, e.g.
|
906
|
+
via tool or schema name.
|
907
|
+
*/
|
908
|
+
schemaName?: string;
|
909
|
+
/**
|
910
|
+
Optional description of the array that should be generated.
|
911
|
+
Used by some providers for additional LLM guidance, e.g.
|
912
|
+
via tool or schema description.
|
913
|
+
*/
|
914
|
+
schemaDescription?: string;
|
915
|
+
/**
|
916
|
+
The mode to use for object generation.
|
917
|
+
|
918
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
919
|
+
|
920
|
+
- 'auto': The provider will choose the best mode for the model.
|
921
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
922
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
923
|
+
|
924
|
+
Please note that most providers do not support all modes.
|
925
|
+
|
926
|
+
Default and recommended: 'auto' (best mode for the model).
|
927
|
+
*/
|
928
|
+
mode?: 'auto' | 'json' | 'tool';
|
929
|
+
/**
|
930
|
+
Optional telemetry configuration (experimental).
|
931
|
+
*/
|
932
|
+
experimental_telemetry?: TelemetrySettings;
|
933
|
+
/**
|
934
|
+
Additional provider-specific metadata. They are passed through
|
935
|
+
to the provider from the AI SDK and enable provider-specific
|
936
|
+
functionality that can be fully encapsulated in the provider.
|
937
|
+
*/
|
938
|
+
experimental_providerMetadata?: ProviderMetadata;
|
939
|
+
/**
|
940
|
+
* Internal. For test use only. May change without notice.
|
941
|
+
*/
|
942
|
+
_internal?: {
|
943
|
+
generateId?: () => string;
|
944
|
+
currentDate?: () => Date;
|
945
|
+
};
|
946
|
+
}): Promise<GenerateObjectResult<Array<ELEMENT>>>;
|
947
|
+
/**
|
948
|
+
Generate a value from an enum (limited list of string values) using a language model.
|
949
|
+
|
950
|
+
This function does not stream the output.
|
951
|
+
|
952
|
+
@return
|
953
|
+
A result object that contains the generated value, the finish reason, the token usage, and additional information.
|
954
|
+
*/
|
955
|
+
declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
956
|
+
output: 'enum';
|
957
|
+
/**
|
958
|
+
The language model to use.
|
959
|
+
*/
|
960
|
+
model: LanguageModel;
|
961
|
+
/**
|
962
|
+
The enum values that the model should use.
|
963
|
+
*/
|
964
|
+
enum: Array<ENUM>;
|
965
|
+
/**
|
966
|
+
The mode to use for object generation.
|
967
|
+
|
968
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
969
|
+
|
970
|
+
- 'auto': The provider will choose the best mode for the model.
|
971
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
972
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
973
|
+
|
974
|
+
Please note that most providers do not support all modes.
|
975
|
+
|
976
|
+
Default and recommended: 'auto' (best mode for the model).
|
977
|
+
*/
|
978
|
+
mode?: 'auto' | 'json' | 'tool';
|
979
|
+
/**
|
980
|
+
Optional telemetry configuration (experimental).
|
981
|
+
*/
|
982
|
+
experimental_telemetry?: TelemetrySettings;
|
983
|
+
/**
|
984
|
+
Additional provider-specific metadata. They are passed through
|
985
|
+
to the provider from the AI SDK and enable provider-specific
|
986
|
+
functionality that can be fully encapsulated in the provider.
|
987
|
+
*/
|
988
|
+
experimental_providerMetadata?: ProviderMetadata;
|
989
|
+
/**
|
990
|
+
* Internal. For test use only. May change without notice.
|
991
|
+
*/
|
992
|
+
_internal?: {
|
993
|
+
generateId?: () => string;
|
994
|
+
currentDate?: () => Date;
|
995
|
+
};
|
996
|
+
}): Promise<GenerateObjectResult<ENUM>>;
|
997
|
+
/**
|
998
|
+
Generate JSON with any schema for a given prompt using a language model.
|
999
|
+
|
1000
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
1001
|
+
|
1002
|
+
@returns
|
1003
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
1004
|
+
*/
|
1005
|
+
declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
1006
|
+
output: 'no-schema';
|
1007
|
+
/**
|
1008
|
+
The language model to use.
|
1009
|
+
*/
|
1010
|
+
model: LanguageModel;
|
1011
|
+
/**
|
1012
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
1013
|
+
*/
|
1014
|
+
mode?: 'json';
|
1015
|
+
/**
|
1016
|
+
Optional telemetry configuration (experimental).
|
1017
|
+
*/
|
1018
|
+
experimental_telemetry?: TelemetrySettings;
|
1019
|
+
/**
|
1020
|
+
Additional provider-specific metadata. They are passed through
|
1021
|
+
to the provider from the AI SDK and enable provider-specific
|
1022
|
+
functionality that can be fully encapsulated in the provider.
|
1023
|
+
*/
|
1024
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1025
|
+
/**
|
1026
|
+
* Internal. For test use only. May change without notice.
|
1027
|
+
*/
|
1028
|
+
_internal?: {
|
1029
|
+
generateId?: () => string;
|
1030
|
+
currentDate?: () => Date;
|
1031
|
+
};
|
1032
|
+
}): Promise<GenerateObjectResult<JSONValue>>;
|
1033
|
+
|
1034
|
+
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
1035
|
+
|
1036
|
+
/**
|
1037
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
1038
|
+
*/
|
1039
|
+
interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
1040
|
+
/**
|
1041
|
+
Warnings from the model provider (e.g. unsupported settings)
|
1042
|
+
*/
|
1043
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
1044
|
+
/**
|
1045
|
+
The token usage of the generated response. Resolved when the response is finished.
|
1046
|
+
*/
|
1047
|
+
readonly usage: Promise<LanguageModelUsage>;
|
1048
|
+
/**
|
1049
|
+
Additional provider-specific metadata. They are passed through
|
1050
|
+
from the provider to the AI SDK and enable provider-specific
|
1051
|
+
results that can be fully encapsulated in the provider.
|
1052
|
+
*/
|
1053
|
+
readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
|
1054
|
+
/**
|
1055
|
+
Additional request information from the last step.
|
1056
|
+
*/
|
1057
|
+
readonly request: Promise<LanguageModelRequestMetadata>;
|
1058
|
+
/**
|
1059
|
+
Additional response information.
|
1060
|
+
*/
|
1061
|
+
readonly response: Promise<LanguageModelResponseMetadata>;
|
1062
|
+
/**
|
1063
|
+
The generated object (typed according to the schema). Resolved when the response is finished.
|
1064
|
+
*/
|
1065
|
+
readonly object: Promise<RESULT>;
|
1066
|
+
/**
|
1067
|
+
Stream of partial objects. It gets more complete as the stream progresses.
|
1068
|
+
|
1069
|
+
Note that the partial object is not validated.
|
1070
|
+
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
1071
|
+
*/
|
1072
|
+
readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
|
1073
|
+
/**
|
1074
|
+
* Stream over complete array elements. Only available if the output strategy is set to `array`.
|
1075
|
+
*/
|
1076
|
+
readonly elementStream: ELEMENT_STREAM;
|
1077
|
+
/**
|
1078
|
+
Text stream of the JSON representation of the generated object. It contains text chunks.
|
1079
|
+
When the stream is finished, the object is valid JSON that can be parsed.
|
1080
|
+
*/
|
1081
|
+
readonly textStream: AsyncIterableStream<string>;
|
1082
|
+
/**
|
1083
|
+
Stream of different types of events, including partial objects, errors, and finish events.
|
1084
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
1085
|
+
*/
|
1086
|
+
readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
|
1087
|
+
/**
|
1088
|
+
Writes text delta output to a Node.js response-like object.
|
1089
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1090
|
+
writes each text delta as a separate chunk.
|
1091
|
+
|
1092
|
+
@param response A Node.js response-like object (ServerResponse).
|
1093
|
+
@param init Optional headers, status code, and status text.
|
1094
|
+
*/
|
1095
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
|
1096
|
+
/**
|
1097
|
+
Creates a simple text stream response.
|
1098
|
+
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
1099
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1100
|
+
Non-text-delta events are ignored.
|
1101
|
+
|
1102
|
+
@param init Optional headers, status code, and status text.
|
1103
|
+
*/
|
1104
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
1105
|
+
}
|
1106
|
+
type ObjectStreamPart<PARTIAL> = {
|
1107
|
+
type: 'object';
|
1108
|
+
object: PARTIAL;
|
1109
|
+
} | {
|
1110
|
+
type: 'text-delta';
|
1111
|
+
textDelta: string;
|
1112
|
+
} | {
|
1113
|
+
type: 'error';
|
1114
|
+
error: unknown;
|
1115
|
+
} | {
|
1116
|
+
type: 'finish';
|
1117
|
+
finishReason: FinishReason;
|
1118
|
+
logprobs?: LogProbs;
|
1119
|
+
usage: LanguageModelUsage;
|
1120
|
+
response: LanguageModelResponseMetadata;
|
1121
|
+
providerMetadata?: ProviderMetadata;
|
1122
|
+
};
|
1123
|
+
|
1124
|
+
type OnFinishCallback<RESULT> = (event: {
|
1125
|
+
/**
|
1126
|
+
The token usage of the generated response.
|
1127
|
+
*/
|
1128
|
+
usage: LanguageModelUsage;
|
1129
|
+
/**
|
1130
|
+
The generated object. Can be undefined if the final object does not match the schema.
|
1131
|
+
*/
|
1132
|
+
object: RESULT | undefined;
|
1133
|
+
/**
|
1134
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
1135
|
+
*/
|
1136
|
+
error: unknown | undefined;
|
1137
|
+
/**
|
1138
|
+
Response metadata.
|
1139
|
+
*/
|
1140
|
+
response: LanguageModelResponseMetadata;
|
1141
|
+
/**
|
1142
|
+
Warnings from the model provider (e.g. unsupported settings).
|
1143
|
+
*/
|
1144
|
+
warnings?: CallWarning[];
|
1145
|
+
/**
|
1146
|
+
Additional provider-specific metadata. They are passed through
|
1147
|
+
from the provider to the AI SDK and enable provider-specific
|
1148
|
+
results that can be fully encapsulated in the provider.
|
1149
|
+
*/
|
1150
|
+
experimental_providerMetadata: ProviderMetadata | undefined;
|
1151
|
+
}) => Promise<void> | void;
|
1152
|
+
/**
|
1153
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
1154
|
+
|
1155
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
1156
|
+
|
1157
|
+
@return
|
1158
|
+
A result object for accessing the partial object stream and additional information.
|
1159
|
+
*/
|
1160
|
+
declare function streamObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
1161
|
+
output?: 'object' | undefined;
|
1162
|
+
/**
|
1163
|
+
The language model to use.
|
1164
|
+
*/
|
1165
|
+
model: LanguageModel;
|
1166
|
+
/**
|
1167
|
+
The schema of the object that the model should generate.
|
1168
|
+
*/
|
1169
|
+
schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
|
1170
|
+
/**
|
1171
|
+
Optional name of the output that should be generated.
|
1172
|
+
Used by some providers for additional LLM guidance, e.g.
|
1173
|
+
via tool or schema name.
|
1174
|
+
*/
|
1175
|
+
schemaName?: string;
|
1176
|
+
/**
|
1177
|
+
Optional description of the output that should be generated.
|
1178
|
+
Used by some providers for additional LLM guidance, e.g.
|
1179
|
+
via tool or schema description.
|
1180
|
+
*/
|
1181
|
+
schemaDescription?: string;
|
1182
|
+
/**
|
1183
|
+
The mode to use for object generation.
|
1184
|
+
|
1185
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
1186
|
+
|
1187
|
+
- 'auto': The provider will choose the best mode for the model.
|
1188
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
1189
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
1190
|
+
|
1191
|
+
Please note that most providers do not support all modes.
|
1192
|
+
|
1193
|
+
Default and recommended: 'auto' (best mode for the model).
|
1194
|
+
*/
|
1195
|
+
mode?: 'auto' | 'json' | 'tool';
|
1196
|
+
/**
|
1197
|
+
Optional telemetry configuration (experimental).
|
1198
|
+
*/
|
1199
|
+
experimental_telemetry?: TelemetrySettings;
|
1200
|
+
/**
|
1201
|
+
Additional provider-specific metadata. They are passed through
|
1202
|
+
to the provider from the AI SDK and enable provider-specific
|
1203
|
+
functionality that can be fully encapsulated in the provider.
|
1204
|
+
*/
|
1205
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1206
|
+
/**
|
1207
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
1208
|
+
*/
|
1209
|
+
onFinish?: OnFinishCallback<OBJECT>;
|
1210
|
+
/**
|
1211
|
+
* Internal. For test use only. May change without notice.
|
1212
|
+
*/
|
1213
|
+
_internal?: {
|
1214
|
+
generateId?: () => string;
|
1215
|
+
currentDate?: () => Date;
|
1216
|
+
now?: () => number;
|
1217
|
+
};
|
1218
|
+
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
1219
|
+
/**
|
1220
|
+
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
1221
|
+
|
1222
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
1223
|
+
|
1224
|
+
@return
|
1225
|
+
A result object for accessing the partial object stream and additional information.
|
1226
|
+
*/
|
1227
|
+
declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
1228
|
+
output: 'array';
|
1229
|
+
/**
|
1230
|
+
The language model to use.
|
1231
|
+
*/
|
1232
|
+
model: LanguageModel;
|
1233
|
+
/**
|
1234
|
+
The element schema of the array that the model should generate.
|
1235
|
+
*/
|
1236
|
+
schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
|
1237
|
+
/**
|
1238
|
+
Optional name of the array that should be generated.
|
1239
|
+
Used by some providers for additional LLM guidance, e.g.
|
1240
|
+
via tool or schema name.
|
1241
|
+
*/
|
1242
|
+
schemaName?: string;
|
1243
|
+
/**
|
1244
|
+
Optional description of the array that should be generated.
|
1245
|
+
Used by some providers for additional LLM guidance, e.g.
|
1246
|
+
via tool or schema description.
|
1247
|
+
*/
|
1248
|
+
schemaDescription?: string;
|
1249
|
+
/**
|
1250
|
+
The mode to use for object generation.
|
1251
|
+
|
1252
|
+
The schema is converted into a JSON schema and used in one of the following ways
|
1253
|
+
|
1254
|
+
- 'auto': The provider will choose the best mode for the model.
|
1255
|
+
- 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
|
1256
|
+
- 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
1257
|
+
|
1258
|
+
Please note that most providers do not support all modes.
|
1259
|
+
|
1260
|
+
Default and recommended: 'auto' (best mode for the model).
|
1261
|
+
*/
|
1262
|
+
mode?: 'auto' | 'json' | 'tool';
|
1263
|
+
/**
|
1264
|
+
Optional telemetry configuration (experimental).
|
1265
|
+
*/
|
1266
|
+
experimental_telemetry?: TelemetrySettings;
|
1267
|
+
/**
|
1268
|
+
Additional provider-specific metadata. They are passed through
|
1269
|
+
to the provider from the AI SDK and enable provider-specific
|
1270
|
+
functionality that can be fully encapsulated in the provider.
|
1271
|
+
*/
|
1272
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1273
|
+
/**
|
1274
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
1275
|
+
*/
|
1276
|
+
onFinish?: OnFinishCallback<Array<ELEMENT>>;
|
1277
|
+
/**
|
1278
|
+
* Internal. For test use only. May change without notice.
|
1279
|
+
*/
|
1280
|
+
_internal?: {
|
1281
|
+
generateId?: () => string;
|
1282
|
+
currentDate?: () => Date;
|
1283
|
+
now?: () => number;
|
1284
|
+
};
|
1285
|
+
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
1286
|
+
/**
|
1287
|
+
Generate JSON with any schema for a given prompt using a language model.
|
1288
|
+
|
1289
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
1290
|
+
|
1291
|
+
@return
|
1292
|
+
A result object for accessing the partial object stream and additional information.
|
1293
|
+
*/
|
1294
|
+
declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
1295
|
+
output: 'no-schema';
|
1296
|
+
/**
|
1297
|
+
The language model to use.
|
1298
|
+
*/
|
1299
|
+
model: LanguageModel;
|
1300
|
+
/**
|
1301
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
1302
|
+
*/
|
1303
|
+
mode?: 'json';
|
1304
|
+
/**
|
1305
|
+
Optional telemetry configuration (experimental).
|
1306
|
+
*/
|
1307
|
+
experimental_telemetry?: TelemetrySettings;
|
1308
|
+
/**
|
1309
|
+
Additional provider-specific metadata. They are passed through
|
1310
|
+
to the provider from the AI SDK and enable provider-specific
|
1311
|
+
functionality that can be fully encapsulated in the provider.
|
1312
|
+
*/
|
1313
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1314
|
+
/**
|
1315
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
1316
|
+
*/
|
1317
|
+
onFinish?: OnFinishCallback<JSONValue>;
|
1318
|
+
/**
|
1319
|
+
* Internal. For test use only. May change without notice.
|
1320
|
+
*/
|
1321
|
+
_internal?: {
|
1322
|
+
generateId?: () => string;
|
1323
|
+
currentDate?: () => Date;
|
1324
|
+
now?: () => number;
|
1325
|
+
};
|
1326
|
+
}): StreamObjectResult<JSONValue, JSONValue, never>;
|
1327
|
+
|
1328
|
+
type Parameters = z.ZodTypeAny | Schema<any>;
|
1329
|
+
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
1330
|
+
interface ToolExecutionOptions {
|
1331
|
+
/**
|
1332
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
1333
|
+
*/
|
1334
|
+
toolCallId: string;
|
1335
|
+
/**
|
1336
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1337
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1338
|
+
*/
|
1339
|
+
messages: CoreMessage[];
|
1340
|
+
/**
|
1341
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1342
|
+
*/
|
1343
|
+
abortSignal?: AbortSignal;
|
1344
|
+
}
|
1345
|
+
/**
|
1346
|
+
A tool contains the description and the schema of the input that the tool expects.
|
1347
|
+
This enables the language model to generate the input.
|
1348
|
+
|
1349
|
+
The tool can also contain an optional execute function for the actual execution function of the tool.
|
1350
|
+
*/
|
1351
|
+
type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
1352
|
+
/**
|
1353
|
+
The schema of the input that the tool expects. The language model will use this to generate the input.
|
1354
|
+
It is also used to validate the output of the language model.
|
1355
|
+
Use descriptions to make the input understandable for the language model.
|
1356
|
+
*/
|
1357
|
+
parameters: PARAMETERS;
|
1358
|
+
/**
|
1359
|
+
Optional conversion function that maps the tool result to multi-part tool content for LLMs.
|
1360
|
+
*/
|
1361
|
+
experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
|
1362
|
+
/**
|
1363
|
+
An async function that is called with the arguments from the tool call and produces a result.
|
1364
|
+
If not provided, the tool will not be executed automatically.
|
1365
|
+
|
1366
|
+
@args is the input of the tool call.
|
1367
|
+
@options.abortSignal is a signal that can be used to abort the tool call.
|
1368
|
+
*/
|
1369
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1370
|
+
} & ({
|
1371
|
+
/**
|
1372
|
+
Function tool.
|
1373
|
+
*/
|
1374
|
+
type?: undefined | 'function';
|
1375
|
+
/**
|
1376
|
+
An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
|
1377
|
+
*/
|
1378
|
+
description?: string;
|
1379
|
+
} | {
|
1380
|
+
/**
|
1381
|
+
Provider-defined tool.
|
1382
|
+
*/
|
1383
|
+
type: 'provider-defined';
|
1384
|
+
/**
|
1385
|
+
The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
|
1386
|
+
*/
|
1387
|
+
id: `${string}.${string}`;
|
1388
|
+
/**
|
1389
|
+
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
1390
|
+
*/
|
1391
|
+
args: Record<string, unknown>;
|
1392
|
+
});
|
1393
|
+
/**
|
1394
|
+
Helper function for inferring the execute args of a tool.
|
1395
|
+
*/
|
1396
|
+
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1397
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1398
|
+
}): CoreTool<PARAMETERS, RESULT> & {
|
1399
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1400
|
+
};
|
1401
|
+
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1402
|
+
execute?: undefined;
|
1403
|
+
}): CoreTool<PARAMETERS, RESULT> & {
|
1404
|
+
execute: undefined;
|
1405
|
+
};
|
1406
|
+
|
1407
|
+
/**
|
1408
|
+
Converts an array of messages from useChat into an array of CoreMessages that can be used
|
1409
|
+
with the AI core functions (e.g. `streamText`).
|
1410
|
+
*/
|
1411
|
+
declare function convertToCoreMessages<TOOLS extends Record<string, CoreTool> = never>(messages: Array<UIMessage>, options?: {
|
1412
|
+
tools?: TOOLS;
|
1413
|
+
}): CoreMessage[];
|
1414
|
+
|
1415
|
+
/**
|
1416
|
+
Create a union of the given object's values, and optionally specify which keys to get the values from.
|
1417
|
+
|
1418
|
+
Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
|
1419
|
+
|
1420
|
+
@example
|
1421
|
+
```
|
1422
|
+
// data.json
|
1423
|
+
{
|
1424
|
+
'foo': 1,
|
1425
|
+
'bar': 2,
|
1426
|
+
'biz': 3
|
1427
|
+
}
|
1428
|
+
|
1429
|
+
// main.ts
|
1430
|
+
import type {ValueOf} from 'type-fest';
|
1431
|
+
import data = require('./data.json');
|
1432
|
+
|
1433
|
+
export function getData(name: string): ValueOf<typeof data> {
|
1434
|
+
return data[name];
|
1435
|
+
}
|
1436
|
+
|
1437
|
+
export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
|
1438
|
+
return data[name];
|
1439
|
+
}
|
1440
|
+
|
1441
|
+
// file.ts
|
1442
|
+
import {getData, onlyBar} from './main';
|
1443
|
+
|
1444
|
+
getData('foo');
|
1445
|
+
//=> 1
|
1446
|
+
|
1447
|
+
onlyBar('foo');
|
1448
|
+
//=> TypeError ...
|
1449
|
+
|
1450
|
+
onlyBar('bar');
|
1451
|
+
//=> 2
|
1452
|
+
```
|
1453
|
+
* @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
|
1454
|
+
*/
|
1455
|
+
type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
|
1456
|
+
|
1457
|
+
type ToolCallUnion<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
1458
|
+
[NAME in keyof TOOLS]: {
|
1459
|
+
type: 'tool-call';
|
1460
|
+
toolCallId: string;
|
1461
|
+
toolName: NAME & string;
|
1462
|
+
args: inferParameters<TOOLS[NAME]['parameters']>;
|
1463
|
+
};
|
1464
|
+
}>;
|
1465
|
+
type ToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToolCallUnion<TOOLS>>;
|
1466
|
+
|
1467
|
+
type ToToolsWithExecute<TOOLS extends Record<string, CoreTool>> = {
|
1468
|
+
[K in keyof TOOLS as TOOLS[K] extends {
|
1469
|
+
execute: any;
|
1470
|
+
} ? K : never]: TOOLS[K];
|
1471
|
+
};
|
1472
|
+
type ToToolsWithDefinedExecute<TOOLS extends Record<string, CoreTool>> = {
|
1473
|
+
[K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
|
1474
|
+
};
|
1475
|
+
type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
1476
|
+
[NAME in keyof TOOLS]: {
|
1477
|
+
type: 'tool-result';
|
1478
|
+
toolCallId: string;
|
1479
|
+
toolName: NAME & string;
|
1480
|
+
args: inferParameters<TOOLS[NAME]['parameters']>;
|
1481
|
+
result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
|
1482
|
+
};
|
1483
|
+
}>;
|
1484
|
+
type ToolResultUnion<TOOLS extends Record<string, CoreTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
|
1485
|
+
type ToolResultArray<TOOLS extends Record<string, CoreTool>> = Array<ToolResultUnion<TOOLS>>;
|
1486
|
+
|
1487
|
+
/**
|
1488
|
+
* The result of a single step in the generation process.
|
1489
|
+
*/
|
1490
|
+
type StepResult<TOOLS extends Record<string, CoreTool>> = {
|
1491
|
+
/**
|
1492
|
+
The generated text.
|
1493
|
+
*/
|
1494
|
+
readonly text: string;
|
1495
|
+
/**
|
1496
|
+
The tool calls that were made during the generation.
|
1497
|
+
*/
|
1498
|
+
readonly toolCalls: ToolCallArray<TOOLS>;
|
1499
|
+
/**
|
1500
|
+
The results of the tool calls.
|
1501
|
+
*/
|
1502
|
+
readonly toolResults: ToolResultArray<TOOLS>;
|
1503
|
+
/**
|
1504
|
+
The reason why the generation finished.
|
1505
|
+
*/
|
1506
|
+
readonly finishReason: FinishReason;
|
1507
|
+
/**
|
1508
|
+
The token usage of the generated text.
|
1509
|
+
*/
|
1510
|
+
readonly usage: LanguageModelUsage;
|
1511
|
+
/**
|
1512
|
+
Warnings from the model provider (e.g. unsupported settings).
|
1513
|
+
*/
|
1514
|
+
readonly warnings: CallWarning[] | undefined;
|
1515
|
+
/**
|
1516
|
+
Logprobs for the completion.
|
1517
|
+
`undefined` if the mode does not support logprobs or if was not enabled.
|
1518
|
+
*/
|
1519
|
+
readonly logprobs: LogProbs | undefined;
|
1520
|
+
/**
|
1521
|
+
Additional request information.
|
1522
|
+
*/
|
1523
|
+
readonly request: LanguageModelRequestMetadata;
|
1524
|
+
/**
|
1525
|
+
Additional response information.
|
1526
|
+
*/
|
1527
|
+
readonly response: LanguageModelResponseMetadata & {
|
1528
|
+
/**
|
1529
|
+
The response messages that were generated during the call. It consists of an assistant message,
|
1530
|
+
potentially containing tool calls.
|
1531
|
+
*/
|
1532
|
+
readonly messages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1533
|
+
};
|
1534
|
+
/**
|
1535
|
+
Additional provider-specific metadata. They are passed through
|
1536
|
+
from the provider to the AI SDK and enable provider-specific
|
1537
|
+
results that can be fully encapsulated in the provider.
|
1538
|
+
*/
|
1539
|
+
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1540
|
+
/**
|
1541
|
+
The type of step that this result is for. The first step is always
|
1542
|
+
an "initial" step, and subsequent steps are either "continue" steps
|
1543
|
+
or "tool-result" steps.
|
1544
|
+
*/
|
1545
|
+
readonly stepType: 'initial' | 'continue' | 'tool-result';
|
1546
|
+
/**
|
1547
|
+
True when there will be a continuation step with a continuation text.
|
1548
|
+
*/
|
1549
|
+
readonly isContinued: boolean;
|
1550
|
+
};
|
1551
|
+
|
1552
|
+
/**
|
1553
|
+
The result of a `generateText` call.
|
1554
|
+
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
1555
|
+
*/
|
1556
|
+
interface GenerateTextResult<TOOLS extends Record<string, CoreTool>, OUTPUT> {
|
1557
|
+
/**
|
1558
|
+
The generated text.
|
1559
|
+
*/
|
1560
|
+
readonly text: string;
|
1561
|
+
/**
|
1562
|
+
The generated structured output. It uses the `experimental_output` specification.
|
1563
|
+
*/
|
1564
|
+
readonly experimental_output: OUTPUT;
|
1565
|
+
/**
|
1566
|
+
The tool calls that were made during the generation.
|
1567
|
+
*/
|
1568
|
+
readonly toolCalls: ToolCallArray<TOOLS>;
|
1569
|
+
/**
|
1570
|
+
The results of the tool calls.
|
1571
|
+
*/
|
1572
|
+
readonly toolResults: ToolResultArray<TOOLS>;
|
1573
|
+
/**
|
1574
|
+
The reason why the generation finished.
|
1575
|
+
*/
|
1576
|
+
readonly finishReason: FinishReason;
|
1577
|
+
/**
|
1578
|
+
The token usage of the generated text.
|
1579
|
+
*/
|
1580
|
+
readonly usage: LanguageModelUsage;
|
1581
|
+
/**
|
1582
|
+
Warnings from the model provider (e.g. unsupported settings)
|
1583
|
+
*/
|
1584
|
+
readonly warnings: CallWarning[] | undefined;
|
1585
|
+
/**
|
1586
|
+
Details for all steps.
|
1587
|
+
You can use this to get information about intermediate steps,
|
1588
|
+
such as the tool calls or the response headers.
|
1589
|
+
*/
|
1590
|
+
readonly steps: Array<StepResult<TOOLS>>;
|
1591
|
+
/**
|
1592
|
+
Additional request information.
|
1593
|
+
*/
|
1594
|
+
readonly request: LanguageModelRequestMetadata;
|
1595
|
+
/**
|
1596
|
+
Additional response information.
|
1597
|
+
*/
|
1598
|
+
readonly response: LanguageModelResponseMetadata & {
|
1599
|
+
/**
|
1600
|
+
The response messages that were generated during the call. It consists of an assistant message,
|
1601
|
+
potentially containing tool calls.
|
1602
|
+
|
1603
|
+
When there are tool results, there is an additional tool message with the tool results that are available.
|
1604
|
+
If there are tools that do not have execute functions, they are not included in the tool results and
|
1605
|
+
need to be added separately.
|
1606
|
+
*/
|
1607
|
+
messages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1608
|
+
};
|
1609
|
+
/**
|
1610
|
+
Logprobs for the completion.
|
1611
|
+
`undefined` if the mode does not support logprobs or if it was not enabled.
|
1612
|
+
|
1613
|
+
@deprecated Will become a provider extension in the future.
|
1614
|
+
*/
|
1615
|
+
readonly logprobs: LogProbs | undefined;
|
1616
|
+
/**
|
1617
|
+
Additional provider-specific metadata. They are passed through
|
1618
|
+
from the provider to the AI SDK and enable provider-specific
|
1619
|
+
results that can be fully encapsulated in the provider.
|
1620
|
+
*/
|
1621
|
+
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1622
|
+
}
|
1623
|
+
|
1624
|
+
interface Output<OUTPUT, PARTIAL> {
|
1625
|
+
readonly type: 'object' | 'text';
|
1626
|
+
injectIntoSystemPrompt(options: {
|
1627
|
+
system: string | undefined;
|
1628
|
+
model: LanguageModel;
|
1629
|
+
}): string | undefined;
|
1630
|
+
responseFormat: (options: {
|
1631
|
+
model: LanguageModel;
|
1632
|
+
}) => LanguageModelV1CallOptions['responseFormat'];
|
1633
|
+
parsePartial(options: {
|
1634
|
+
text: string;
|
1635
|
+
}): {
|
1636
|
+
partial: PARTIAL;
|
1637
|
+
} | undefined;
|
1638
|
+
parseOutput(options: {
|
1639
|
+
text: string;
|
1640
|
+
}, context: {
|
1641
|
+
response: LanguageModelResponseMetadata;
|
1642
|
+
usage: LanguageModelUsage;
|
1643
|
+
}): OUTPUT;
|
1644
|
+
}
|
1645
|
+
declare const text: () => Output<string, string>;
|
1646
|
+
declare const object: <OUTPUT>({ schema: inputSchema, }: {
|
1647
|
+
schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
|
1648
|
+
}) => Output<OUTPUT, DeepPartial<OUTPUT>>;
|
1649
|
+
|
1650
|
+
type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
|
1651
|
+
declare const output_object: typeof object;
|
1652
|
+
declare const output_text: typeof text;
|
1653
|
+
declare namespace output {
|
1654
|
+
export {
|
1655
|
+
output_Output as Output,
|
1656
|
+
output_object as object,
|
1657
|
+
output_text as text,
|
1658
|
+
};
|
1659
|
+
}
|
1660
|
+
|
1661
|
+
declare const symbol$c: unique symbol;
|
1662
|
+
declare class InvalidToolArgumentsError extends AISDKError {
|
1663
|
+
private readonly [symbol$c];
|
1664
|
+
readonly toolName: string;
|
1665
|
+
readonly toolArgs: string;
|
1666
|
+
constructor({ toolArgs, toolName, cause, message, }: {
|
1667
|
+
message?: string;
|
1668
|
+
toolArgs: string;
|
1669
|
+
toolName: string;
|
1670
|
+
cause: unknown;
|
1671
|
+
});
|
1672
|
+
static isInstance(error: unknown): error is InvalidToolArgumentsError;
|
1673
|
+
}
|
1674
|
+
|
1675
|
+
declare const symbol$b: unique symbol;
|
1676
|
+
declare class NoSuchToolError extends AISDKError {
|
1677
|
+
private readonly [symbol$b];
|
1678
|
+
readonly toolName: string;
|
1679
|
+
readonly availableTools: string[] | undefined;
|
1680
|
+
constructor({ toolName, availableTools, message, }: {
|
1681
|
+
toolName: string;
|
1682
|
+
availableTools?: string[] | undefined;
|
1683
|
+
message?: string;
|
1684
|
+
});
|
1685
|
+
static isInstance(error: unknown): error is NoSuchToolError;
|
1686
|
+
}
|
1687
|
+
|
1688
|
+
/**
|
1689
|
+
* A function that attempts to repair a tool call that failed to parse.
|
1690
|
+
*
|
1691
|
+
* It receives the error and the context as arguments and returns the repair
|
1692
|
+
* tool call JSON as text.
|
1693
|
+
*
|
1694
|
+
* @param options.system - The system prompt.
|
1695
|
+
* @param options.messages - The messages in the current generation step.
|
1696
|
+
* @param options.toolCall - The tool call that failed to parse.
|
1697
|
+
* @param options.tools - The tools that are available.
|
1698
|
+
* @param options.parameterSchema - A function that returns the JSON Schema for a tool.
|
1699
|
+
* @param options.error - The error that occurred while parsing the tool call.
|
1700
|
+
*/
|
1701
|
+
type ToolCallRepairFunction<TOOLS extends Record<string, CoreTool>> = (options: {
|
1702
|
+
system: string | undefined;
|
1703
|
+
messages: CoreMessage[];
|
1704
|
+
toolCall: LanguageModelV1FunctionToolCall;
|
1705
|
+
tools: TOOLS;
|
1706
|
+
parameterSchema: (options: {
|
1707
|
+
toolName: string;
|
1708
|
+
}) => JSONSchema7;
|
1709
|
+
error: NoSuchToolError | InvalidToolArgumentsError;
|
1710
|
+
}) => Promise<LanguageModelV1FunctionToolCall | null>;
|
1711
|
+
|
1712
|
+
/**
|
1713
|
+
Generate a text and call tools for a given prompt using a language model.
|
1714
|
+
|
1715
|
+
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
1716
|
+
|
1717
|
+
@param model - The language model to use.
|
1718
|
+
|
1719
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
1720
|
+
@param toolChoice - The tool choice strategy. Default: 'auto'.
|
1721
|
+
|
1722
|
+
@param system - A system message that will be part of the prompt.
|
1723
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
1724
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
1725
|
+
|
1726
|
+
@param maxTokens - Maximum number of tokens to generate.
|
1727
|
+
@param temperature - Temperature setting.
|
1728
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1729
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1730
|
+
@param topP - Nucleus sampling.
|
1731
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1732
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
1733
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
1734
|
+
Used to remove "long tail" low probability responses.
|
1735
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
1736
|
+
@param presencePenalty - Presence penalty setting.
|
1737
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
1738
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1739
|
+
@param frequencyPenalty - Frequency penalty setting.
|
1740
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
1741
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
1742
|
+
@param stopSequences - Stop sequences.
|
1743
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
1744
|
+
@param seed - The seed (integer) to use for random sampling.
|
1745
|
+
If set and supported by the model, calls will generate deterministic results.
|
1746
|
+
|
1747
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
1748
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1749
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1750
|
+
|
1751
|
+
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
1752
|
+
|
1753
|
+
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1754
|
+
|
1755
|
+
@returns
|
1756
|
+
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1757
|
+
*/
|
1758
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1759
|
+
/**
|
1760
|
+
The language model to use.
|
1761
|
+
*/
|
1762
|
+
model: LanguageModel;
|
1763
|
+
/**
|
1764
|
+
The tools that the model can call. The model needs to support calling tools.
|
1765
|
+
*/
|
1766
|
+
tools?: TOOLS;
|
1767
|
+
/**
|
1768
|
+
The tool choice strategy. Default: 'auto'.
|
1769
|
+
*/
|
1770
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
1771
|
+
/**
|
1772
|
+
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
|
1773
|
+
|
1774
|
+
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
1775
|
+
|
1776
|
+
By default, it's set to 1, which means that only a single LLM call is made.
|
1777
|
+
*/
|
1778
|
+
maxSteps?: number;
|
1779
|
+
/**
|
1780
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1781
|
+
|
1782
|
+
By default, it's set to false.
|
1783
|
+
*/
|
1784
|
+
experimental_continueSteps?: boolean;
|
1785
|
+
/**
|
1786
|
+
Optional telemetry configuration (experimental).
|
1787
|
+
*/
|
1788
|
+
experimental_telemetry?: TelemetrySettings;
|
1789
|
+
/**
|
1790
|
+
Additional provider-specific metadata. They are passed through
|
1791
|
+
to the provider from the AI SDK and enable provider-specific
|
1792
|
+
functionality that can be fully encapsulated in the provider.
|
1793
|
+
*/
|
1794
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1795
|
+
/**
|
1796
|
+
Limits the tools that are available for the model to call without
|
1797
|
+
changing the tool call and result types in the result.
|
1798
|
+
*/
|
1799
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
1800
|
+
/**
|
1801
|
+
Optional specification for parsing structured outputs from the LLM response.
|
1802
|
+
*/
|
1803
|
+
experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
|
1804
|
+
/**
|
1805
|
+
A function that attempts to repair a tool call that failed to parse.
|
1806
|
+
*/
|
1807
|
+
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
1808
|
+
/**
|
1809
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1810
|
+
*/
|
1811
|
+
onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
|
1812
|
+
/**
|
1813
|
+
* Internal. For test use only. May change without notice.
|
1814
|
+
*/
|
1815
|
+
_internal?: {
|
1816
|
+
generateId?: () => string;
|
1817
|
+
currentDate?: () => Date;
|
1818
|
+
};
|
1819
|
+
}): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
1820
|
+
|
1821
|
+
/**
|
1822
|
+
* A stream wrapper to send custom JSON-encoded data back to the client.
|
1823
|
+
*
|
1824
|
+
* @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
|
1825
|
+
*/
|
1826
|
+
declare class StreamData {
|
1827
|
+
private encoder;
|
1828
|
+
private controller;
|
1829
|
+
stream: ReadableStream<Uint8Array>;
|
1830
|
+
private isClosed;
|
1831
|
+
private warningTimeout;
|
1832
|
+
constructor();
|
1833
|
+
close(): Promise<void>;
|
1834
|
+
append(value: JSONValue$1): void;
|
1835
|
+
appendMessageAnnotation(value: JSONValue$1): void;
|
1836
|
+
}
|
1837
|
+
|
1838
|
+
/**
|
1839
|
+
A result object for accessing different stream types and additional information.
|
1840
|
+
*/
|
1841
|
+
interface StreamTextResult<TOOLS extends Record<string, CoreTool>, PARTIAL_OUTPUT> {
|
1842
|
+
/**
|
1843
|
+
Warnings from the model provider (e.g. unsupported settings) for the first step.
|
1844
|
+
*/
|
1845
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
1846
|
+
/**
|
1847
|
+
The total token usage of the generated response.
|
1848
|
+
When there are multiple steps, the usage is the sum of all step usages.
|
1849
|
+
|
1850
|
+
Resolved when the response is finished.
|
1851
|
+
*/
|
1852
|
+
readonly usage: Promise<LanguageModelUsage>;
|
1853
|
+
/**
|
1854
|
+
The reason why the generation finished. Taken from the last step.
|
1855
|
+
|
1856
|
+
Resolved when the response is finished.
|
1857
|
+
*/
|
1858
|
+
readonly finishReason: Promise<FinishReason>;
|
1859
|
+
/**
|
1860
|
+
Additional provider-specific metadata from the last step.
|
1861
|
+
Metadata is passed through from the provider to the AI SDK and
|
1862
|
+
enables provider-specific results that can be fully encapsulated in the provider.
|
1863
|
+
*/
|
1864
|
+
readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
|
1865
|
+
/**
|
1866
|
+
The full text that has been generated by the last step.
|
1867
|
+
|
1868
|
+
Resolved when the response is finished.
|
1869
|
+
*/
|
1870
|
+
readonly text: Promise<string>;
|
1871
|
+
/**
|
1872
|
+
The tool calls that have been executed in the last step.
|
1873
|
+
|
1874
|
+
Resolved when the response is finished.
|
1875
|
+
*/
|
1876
|
+
readonly toolCalls: Promise<ToolCallUnion<TOOLS>[]>;
|
1877
|
+
/**
|
1878
|
+
The tool results that have been generated in the last step.
|
1879
|
+
|
1880
|
+
Resolved when the all tool executions are finished.
|
1881
|
+
*/
|
1882
|
+
readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
|
1883
|
+
/**
|
1884
|
+
Details for all steps.
|
1885
|
+
You can use this to get information about intermediate steps,
|
1886
|
+
such as the tool calls or the response headers.
|
1887
|
+
*/
|
1888
|
+
readonly steps: Promise<Array<StepResult<TOOLS>>>;
|
1889
|
+
/**
|
1890
|
+
Additional request information from the last step.
|
1891
|
+
*/
|
1892
|
+
readonly request: Promise<LanguageModelRequestMetadata>;
|
1893
|
+
/**
|
1894
|
+
Additional response information from the last step.
|
1895
|
+
*/
|
1896
|
+
readonly response: Promise<LanguageModelResponseMetadata & {
|
1897
|
+
/**
|
1898
|
+
The response messages that were generated during the call. It consists of an assistant message,
|
1899
|
+
potentially containing tool calls.
|
1900
|
+
|
1901
|
+
When there are tool results, there is an additional tool message with the tool results that are available.
|
1902
|
+
If there are tools that do not have execute functions, they are not included in the tool results and
|
1903
|
+
need to be added separately.
|
1904
|
+
*/
|
1905
|
+
messages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1906
|
+
}>;
|
1907
|
+
/**
|
1908
|
+
A text stream that returns only the generated text deltas. You can use it
|
1909
|
+
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
1910
|
+
stream will throw the error.
|
1911
|
+
*/
|
1912
|
+
readonly textStream: AsyncIterableStream<string>;
|
1913
|
+
/**
|
1914
|
+
A stream with all events, including text deltas, tool calls, tool results, and
|
1915
|
+
errors.
|
1916
|
+
You can use it as either an AsyncIterable or a ReadableStream.
|
1917
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
1918
|
+
*/
|
1919
|
+
readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
|
1920
|
+
/**
|
1921
|
+
A stream of partial outputs. It uses the `experimental_output` specification.
|
1922
|
+
*/
|
1923
|
+
readonly experimental_partialOutputStream: AsyncIterableStream<PARTIAL_OUTPUT>;
|
1924
|
+
/**
|
1925
|
+
Converts the result to a data stream.
|
1926
|
+
|
1927
|
+
@param data an optional StreamData object that will be merged into the stream.
|
1928
|
+
@param getErrorMessage an optional function that converts an error to an error message.
|
1929
|
+
@param sendUsage whether to send the usage information to the client. Defaults to true.
|
1930
|
+
|
1931
|
+
@return A data stream.
|
1932
|
+
*/
|
1933
|
+
toDataStream(options?: {
|
1934
|
+
data?: StreamData;
|
1935
|
+
getErrorMessage?: (error: unknown) => string;
|
1936
|
+
sendUsage?: boolean;
|
1937
|
+
}): ReadableStream<Uint8Array>;
|
1938
|
+
/**
|
1939
|
+
* Merges the result as a data stream into another data stream.
|
1940
|
+
*
|
1941
|
+
* @param dataStream A data stream writer.
|
1942
|
+
*/
|
1943
|
+
mergeIntoDataStream(dataStream: DataStreamWriter): void;
|
1944
|
+
/**
|
1945
|
+
Writes data stream output to a Node.js response-like object.
|
1946
|
+
|
1947
|
+
@param response A Node.js response-like object (ServerResponse).
|
1948
|
+
@param options.status The status code.
|
1949
|
+
@param options.statusText The status text.
|
1950
|
+
@param options.headers The headers.
|
1951
|
+
@param options.data The stream data.
|
1952
|
+
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1953
|
+
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1954
|
+
*/
|
1955
|
+
pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
|
1956
|
+
data?: StreamData;
|
1957
|
+
getErrorMessage?: (error: unknown) => string;
|
1958
|
+
sendUsage?: boolean;
|
1959
|
+
}): void;
|
1960
|
+
/**
|
1961
|
+
Writes text delta output to a Node.js response-like object.
|
1962
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1963
|
+
writes each text delta as a separate chunk.
|
1964
|
+
|
1965
|
+
@param response A Node.js response-like object (ServerResponse).
|
1966
|
+
@param init Optional headers, status code, and status text.
|
1967
|
+
*/
|
1968
|
+
pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
|
1969
|
+
/**
|
1970
|
+
Converts the result to a streamed response object with a stream data part stream.
|
1971
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
1972
|
+
|
1973
|
+
@param options.status The status code.
|
1974
|
+
@param options.statusText The status text.
|
1975
|
+
@param options.headers The headers.
|
1976
|
+
@param options.data The stream data.
|
1977
|
+
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1978
|
+
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1979
|
+
|
1980
|
+
@return A response object.
|
1981
|
+
*/
|
1982
|
+
toDataStreamResponse(options?: ResponseInit & {
|
1983
|
+
data?: StreamData;
|
1984
|
+
getErrorMessage?: (error: unknown) => string;
|
1985
|
+
sendUsage?: boolean;
|
1986
|
+
}): Response;
|
1987
|
+
/**
|
1988
|
+
Creates a simple text stream response.
|
1989
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1990
|
+
Non-text-delta events are ignored.
|
1991
|
+
|
1992
|
+
@param init Optional headers, status code, and status text.
|
1993
|
+
*/
|
1994
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
1995
|
+
}
|
1996
|
+
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
1997
|
+
type: 'text-delta';
|
1998
|
+
textDelta: string;
|
1999
|
+
} | ({
|
2000
|
+
type: 'tool-call';
|
2001
|
+
} & ToolCallUnion<TOOLS>) | {
|
2002
|
+
type: 'tool-call-streaming-start';
|
2003
|
+
toolCallId: string;
|
2004
|
+
toolName: string;
|
2005
|
+
} | {
|
2006
|
+
type: 'tool-call-delta';
|
2007
|
+
toolCallId: string;
|
2008
|
+
toolName: string;
|
2009
|
+
argsTextDelta: string;
|
2010
|
+
} | ({
|
2011
|
+
type: 'tool-result';
|
2012
|
+
} & ToolResultUnion<TOOLS>) | {
|
2013
|
+
type: 'step-finish';
|
2014
|
+
finishReason: FinishReason;
|
2015
|
+
logprobs?: LogProbs;
|
2016
|
+
usage: LanguageModelUsage;
|
2017
|
+
request: LanguageModelRequestMetadata;
|
2018
|
+
response: LanguageModelResponseMetadata;
|
2019
|
+
warnings: CallWarning[] | undefined;
|
2020
|
+
experimental_providerMetadata?: ProviderMetadata;
|
2021
|
+
isContinued: boolean;
|
2022
|
+
} | {
|
2023
|
+
type: 'finish';
|
2024
|
+
finishReason: FinishReason;
|
2025
|
+
logprobs?: LogProbs;
|
2026
|
+
usage: LanguageModelUsage;
|
2027
|
+
response: LanguageModelResponseMetadata;
|
2028
|
+
experimental_providerMetadata?: ProviderMetadata;
|
2029
|
+
} | {
|
2030
|
+
type: 'error';
|
2031
|
+
error: unknown;
|
2032
|
+
};
|
2033
|
+
|
2034
|
+
/**
|
2035
|
+
Generate a text and call tools for a given prompt using a language model.
|
2036
|
+
|
2037
|
+
This function streams the output. If you do not want to stream the output, use `generateText` instead.
|
2038
|
+
|
2039
|
+
@param model - The language model to use.
|
2040
|
+
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2041
|
+
|
2042
|
+
@param system - A system message that will be part of the prompt.
|
2043
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
2044
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
2045
|
+
|
2046
|
+
@param maxTokens - Maximum number of tokens to generate.
|
2047
|
+
@param temperature - Temperature setting.
|
2048
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2049
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2050
|
+
@param topP - Nucleus sampling.
|
2051
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2052
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
2053
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
2054
|
+
Used to remove "long tail" low probability responses.
|
2055
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
2056
|
+
@param presencePenalty - Presence penalty setting.
|
2057
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
2058
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2059
|
+
@param frequencyPenalty - Frequency penalty setting.
|
2060
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
2061
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
2062
|
+
@param stopSequences - Stop sequences.
|
2063
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
2064
|
+
@param seed - The seed (integer) to use for random sampling.
|
2065
|
+
If set and supported by the model, calls will generate deterministic results.
|
2066
|
+
|
2067
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
2068
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
2069
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
2070
|
+
|
2071
|
+
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
2072
|
+
|
2073
|
+
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
2074
|
+
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2075
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
2076
|
+
(for tools that have an `execute` function) are finished.
|
2077
|
+
|
2078
|
+
@return
|
2079
|
+
A result object for accessing different stream types and additional information.
|
2080
|
+
*/
|
2081
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
2082
|
+
/**
|
2083
|
+
The language model to use.
|
2084
|
+
*/
|
2085
|
+
model: LanguageModel;
|
2086
|
+
/**
|
2087
|
+
The tools that the model can call. The model needs to support calling tools.
|
2088
|
+
*/
|
2089
|
+
tools?: TOOLS;
|
2090
|
+
/**
|
2091
|
+
The tool choice strategy. Default: 'auto'.
|
2092
|
+
*/
|
2093
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
2094
|
+
/**
|
2095
|
+
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
|
2096
|
+
|
2097
|
+
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
2098
|
+
|
2099
|
+
By default, it's set to 1, which means that only a single LLM call is made.
|
2100
|
+
*/
|
2101
|
+
maxSteps?: number;
|
2102
|
+
/**
|
2103
|
+
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
2104
|
+
|
2105
|
+
By default, it's set to false.
|
2106
|
+
*/
|
2107
|
+
experimental_continueSteps?: boolean;
|
2108
|
+
/**
|
2109
|
+
Optional telemetry configuration (experimental).
|
2110
|
+
*/
|
2111
|
+
experimental_telemetry?: TelemetrySettings;
|
2112
|
+
/**
|
2113
|
+
Additional provider-specific metadata. They are passed through
|
2114
|
+
to the provider from the AI SDK and enable provider-specific
|
2115
|
+
functionality that can be fully encapsulated in the provider.
|
2116
|
+
*/
|
2117
|
+
experimental_providerMetadata?: ProviderMetadata;
|
2118
|
+
/**
|
2119
|
+
Limits the tools that are available for the model to call without
|
2120
|
+
changing the tool call and result types in the result.
|
2121
|
+
*/
|
2122
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
2123
|
+
/**
|
2124
|
+
Optional specification for parsing structured outputs from the LLM response.
|
2125
|
+
*/
|
2126
|
+
experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
|
2127
|
+
/**
|
2128
|
+
A function that attempts to repair a tool call that failed to parse.
|
2129
|
+
*/
|
2130
|
+
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
2131
|
+
/**
|
2132
|
+
Enable streaming of tool call deltas as they are generated. Disabled by default.
|
2133
|
+
*/
|
2134
|
+
experimental_toolCallStreaming?: boolean;
|
2135
|
+
/**
|
2136
|
+
Optional transformation that is applied to the stream.
|
2137
|
+
|
2138
|
+
@param stopStream - A function that stops the source stream.
|
2139
|
+
@param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
2140
|
+
*/
|
2141
|
+
experimental_transform?: (options: {
|
2142
|
+
tools: TOOLS;
|
2143
|
+
stopStream: () => void;
|
2144
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2145
|
+
/**
|
2146
|
+
Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
2147
|
+
*/
|
2148
|
+
onChunk?: (event: {
|
2149
|
+
chunk: Extract<TextStreamPart<TOOLS>, {
|
2150
|
+
type: 'text-delta' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
2151
|
+
}>;
|
2152
|
+
}) => Promise<void> | void;
|
2153
|
+
/**
|
2154
|
+
Callback that is called when the LLM response and all request tool executions
|
2155
|
+
(for tools that have an `execute` function) are finished.
|
2156
|
+
|
2157
|
+
The usage is the combined usage of all steps.
|
2158
|
+
*/
|
2159
|
+
onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
|
2160
|
+
/**
|
2161
|
+
Details for all steps.
|
2162
|
+
*/
|
2163
|
+
readonly steps: StepResult<TOOLS>[];
|
2164
|
+
}) => Promise<void> | void;
|
2165
|
+
/**
|
2166
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
2167
|
+
*/
|
2168
|
+
onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
|
2169
|
+
/**
|
2170
|
+
* Internal. For test use only. May change without notice.
|
2171
|
+
*/
|
2172
|
+
_internal?: {
|
2173
|
+
now?: () => number;
|
2174
|
+
generateId?: () => string;
|
2175
|
+
currentDate?: () => Date;
|
2176
|
+
};
|
2177
|
+
}): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
|
2178
|
+
|
2179
|
+
/**
|
2180
|
+
* Smooths text streaming output.
|
2181
|
+
*
|
2182
|
+
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
2183
|
+
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
2184
|
+
*
|
2185
|
+
* @returns A transform stream that smooths text streaming output.
|
2186
|
+
*/
|
2187
|
+
declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
|
2188
|
+
delayInMs?: number | null;
|
2189
|
+
chunking?: 'word' | 'line' | RegExp;
|
2190
|
+
/**
|
2191
|
+
* Internal. For test use only. May change without notice.
|
2192
|
+
*/
|
2193
|
+
_internal?: {
|
2194
|
+
delay?: (delayInMs: number | null) => Promise<void>;
|
2195
|
+
};
|
2196
|
+
}): (options: {
|
2197
|
+
tools: TOOLS;
|
2198
|
+
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
|
2199
|
+
|
2200
|
+
/**
|
2201
|
+
* Experimental middleware for LanguageModelV1.
|
2202
|
+
* This type defines the structure for middleware that can be used to modify
|
2203
|
+
* the behavior of LanguageModelV1 operations.
|
2204
|
+
*/
|
2205
|
+
type Experimental_LanguageModelV1Middleware = {
|
2206
|
+
/**
|
2207
|
+
* Transforms the parameters before they are passed to the language model.
|
2208
|
+
* @param options - Object containing the type of operation and the parameters.
|
2209
|
+
* @param options.type - The type of operation ('generate' or 'stream').
|
2210
|
+
* @param options.params - The original parameters for the language model call.
|
2211
|
+
* @returns A promise that resolves to the transformed parameters.
|
2212
|
+
*/
|
2213
|
+
transformParams?: (options: {
|
2214
|
+
type: 'generate' | 'stream';
|
2215
|
+
params: LanguageModelV1CallOptions;
|
2216
|
+
}) => PromiseLike<LanguageModelV1CallOptions>;
|
2217
|
+
/**
|
2218
|
+
* Wraps the generate operation of the language model.
|
2219
|
+
* @param options - Object containing the generate function, parameters, and model.
|
2220
|
+
* @param options.doGenerate - The original generate function.
|
2221
|
+
* @param options.params - The parameters for the generate call. If the
|
2222
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
2223
|
+
* @param options.model - The language model instance.
|
2224
|
+
* @returns A promise that resolves to the result of the generate operation.
|
2225
|
+
*/
|
2226
|
+
wrapGenerate?: (options: {
|
2227
|
+
doGenerate: () => ReturnType<LanguageModelV1['doGenerate']>;
|
2228
|
+
params: LanguageModelV1CallOptions;
|
2229
|
+
model: LanguageModelV1;
|
2230
|
+
}) => Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
2231
|
+
/**
|
2232
|
+
* Wraps the stream operation of the language model.
|
2233
|
+
* @param options - Object containing the stream function, parameters, and model.
|
2234
|
+
* @param options.doStream - The original stream function.
|
2235
|
+
* @param options.params - The parameters for the stream call. If the
|
2236
|
+
* `transformParams` middleware is used, this will be the transformed parameters.
|
2237
|
+
* @param options.model - The language model instance.
|
2238
|
+
* @returns A promise that resolves to the result of the stream operation.
|
2239
|
+
*/
|
2240
|
+
wrapStream?: (options: {
|
2241
|
+
doStream: () => ReturnType<LanguageModelV1['doStream']>;
|
2242
|
+
params: LanguageModelV1CallOptions;
|
2243
|
+
model: LanguageModelV1;
|
2244
|
+
}) => PromiseLike<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
2245
|
+
};
|
2246
|
+
|
2247
|
+
/**
|
2248
|
+
* Wraps a LanguageModelV1 instance with middleware functionality.
|
2249
|
+
* This function allows you to apply middleware to transform parameters,
|
2250
|
+
* wrap generate operations, and wrap stream operations of a language model.
|
2251
|
+
*
|
2252
|
+
* @param options - Configuration options for wrapping the language model.
|
2253
|
+
* @param options.model - The original LanguageModelV1 instance to be wrapped.
|
2254
|
+
* @param options.middleware - The middleware to be applied to the language model.
|
2255
|
+
* @param options.modelId - Optional custom model ID to override the original model's ID.
|
2256
|
+
* @param options.providerId - Optional custom provider ID to override the original model's provider.
|
2257
|
+
* @returns A new LanguageModelV1 instance with middleware applied.
|
2258
|
+
*/
|
2259
|
+
declare const experimental_wrapLanguageModel: ({ model, middleware: { transformParams, wrapGenerate, wrapStream }, modelId, providerId, }: {
|
2260
|
+
model: LanguageModelV1;
|
2261
|
+
middleware: Experimental_LanguageModelV1Middleware;
|
2262
|
+
modelId?: string;
|
2263
|
+
providerId?: string;
|
2264
|
+
}) => LanguageModelV1;
|
2265
|
+
|
2266
|
+
/**
|
2267
|
+
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
2268
|
+
*
|
2269
|
+
* @param {Object} options - The options for creating the custom provider.
|
2270
|
+
* @param {Record<string, LanguageModelV1>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModelV1 instances.
|
2271
|
+
* @param {Record<string, EmbeddingModelV1<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModelV1<string> instances.
|
2272
|
+
* @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
|
2273
|
+
* @returns {Provider} A Provider object with languageModel and textEmbeddingModel methods.
|
2274
|
+
*
|
2275
|
+
* @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
|
2276
|
+
*/
|
2277
|
+
declare function experimental_customProvider({ languageModels, textEmbeddingModels, fallbackProvider, }: {
|
2278
|
+
languageModels?: Record<string, LanguageModelV1>;
|
2279
|
+
textEmbeddingModels?: Record<string, EmbeddingModelV1<string>>;
|
2280
|
+
fallbackProvider?: Provider;
|
2281
|
+
}): Provider;
|
2282
|
+
|
2283
|
+
declare const symbol$a: unique symbol;
|
2284
|
+
declare class NoSuchProviderError extends NoSuchModelError {
|
2285
|
+
private readonly [symbol$a];
|
2286
|
+
readonly providerId: string;
|
2287
|
+
readonly availableProviders: string[];
|
2288
|
+
constructor({ modelId, modelType, providerId, availableProviders, message, }: {
|
2289
|
+
modelId: string;
|
2290
|
+
modelType: 'languageModel' | 'textEmbeddingModel';
|
2291
|
+
providerId: string;
|
2292
|
+
availableProviders: string[];
|
2293
|
+
message?: string;
|
2294
|
+
});
|
2295
|
+
static isInstance(error: unknown): error is NoSuchProviderError;
|
2296
|
+
}
|
2297
|
+
|
2298
|
+
/**
|
2299
|
+
* Creates a registry for the given providers.
|
2300
|
+
*/
|
2301
|
+
declare function experimental_createProviderRegistry(providers: Record<string, Provider>): Provider;
|
2302
|
+
|
2303
|
+
/**
|
2304
|
+
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
2305
|
+
* comparing the similarity of two vectors such as embeddings.
|
2306
|
+
*
|
2307
|
+
* @param vector1 - The first vector.
|
2308
|
+
* @param vector2 - The second vector.
|
2309
|
+
*
|
2310
|
+
* @returns The cosine similarity between vector1 and vector2.
|
2311
|
+
* @throws {Error} If the vectors do not have the same length.
|
2312
|
+
*/
|
2313
|
+
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
2314
|
+
|
2315
|
+
/**
|
2316
|
+
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
2317
|
+
*
|
2318
|
+
* @param options - The configuration options
|
2319
|
+
* @param options.chunks - Array of values to be emitted by the stream
|
2320
|
+
* @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2321
|
+
* @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2322
|
+
* @returns A ReadableStream that emits the provided values
|
2323
|
+
*/
|
2324
|
+
declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
|
2325
|
+
chunks: T[];
|
2326
|
+
initialDelayInMs?: number | null;
|
2327
|
+
chunkDelayInMs?: number | null;
|
2328
|
+
_internal?: {
|
2329
|
+
delay?: (ms: number | null) => Promise<void>;
|
2330
|
+
};
|
2331
|
+
}): ReadableStream<T>;
|
2332
|
+
|
2333
|
+
declare const symbol$9: unique symbol;
|
2334
|
+
declare class InvalidArgumentError extends AISDKError {
|
2335
|
+
private readonly [symbol$9];
|
2336
|
+
readonly parameter: string;
|
2337
|
+
readonly value: unknown;
|
2338
|
+
constructor({ parameter, value, message, }: {
|
2339
|
+
parameter: string;
|
2340
|
+
value: unknown;
|
2341
|
+
message: string;
|
2342
|
+
});
|
2343
|
+
static isInstance(error: unknown): error is InvalidArgumentError;
|
2344
|
+
}
|
2345
|
+
|
2346
|
+
declare const symbol$8: unique symbol;
|
2347
|
+
/**
|
2348
|
+
Thrown when no object could be generated. This can have several causes:
|
2349
|
+
|
2350
|
+
- The model failed to generate a response.
|
2351
|
+
- The model generated a response that could not be parsed.
|
2352
|
+
- The model generated a response that could not be validated against the schema.
|
2353
|
+
|
2354
|
+
The error contains the following properties:
|
2355
|
+
|
2356
|
+
- `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
|
2357
|
+
*/
|
2358
|
+
declare class NoObjectGeneratedError extends AISDKError {
|
2359
|
+
private readonly [symbol$8];
|
2360
|
+
/**
|
2361
|
+
The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
|
2362
|
+
*/
|
2363
|
+
readonly text: string | undefined;
|
2364
|
+
/**
|
2365
|
+
The response metadata.
|
2366
|
+
*/
|
2367
|
+
readonly response: LanguageModelResponseMetadata | undefined;
|
2368
|
+
/**
|
2369
|
+
The usage of the model.
|
2370
|
+
*/
|
2371
|
+
readonly usage: LanguageModelUsage | undefined;
|
2372
|
+
constructor({ message, cause, text, response, usage, }: {
|
2373
|
+
message?: string;
|
2374
|
+
cause?: Error;
|
2375
|
+
text?: string;
|
2376
|
+
response: LanguageModelResponseMetadata;
|
2377
|
+
usage: LanguageModelUsage;
|
2378
|
+
});
|
2379
|
+
static isInstance(error: unknown): error is NoObjectGeneratedError;
|
2380
|
+
}
|
2381
|
+
|
2382
|
+
declare const symbol$7: unique symbol;
|
2383
|
+
/**
|
2384
|
+
Thrown when no output type is specified and output-related methods are called.
|
2385
|
+
*/
|
2386
|
+
declare class NoOutputSpecifiedError extends AISDKError {
|
2387
|
+
private readonly [symbol$7];
|
2388
|
+
constructor({ message }?: {
|
2389
|
+
message?: string;
|
2390
|
+
});
|
2391
|
+
static isInstance(error: unknown): error is NoOutputSpecifiedError;
|
2392
|
+
}
|
2393
|
+
|
2394
|
+
declare const symbol$6: unique symbol;
|
2395
|
+
declare class ToolCallRepairError extends AISDKError {
|
2396
|
+
private readonly [symbol$6];
|
2397
|
+
readonly originalError: NoSuchToolError | InvalidToolArgumentsError;
|
2398
|
+
constructor({ cause, originalError, message, }: {
|
2399
|
+
message?: string;
|
2400
|
+
cause: unknown;
|
2401
|
+
originalError: NoSuchToolError | InvalidToolArgumentsError;
|
2402
|
+
});
|
2403
|
+
static isInstance(error: unknown): error is ToolCallRepairError;
|
2404
|
+
}
|
2405
|
+
|
2406
|
+
declare const symbol$5: unique symbol;
|
2407
|
+
declare class ToolExecutionError extends AISDKError {
|
2408
|
+
private readonly [symbol$5];
|
2409
|
+
readonly toolName: string;
|
2410
|
+
readonly toolArgs: JSONValue;
|
2411
|
+
readonly toolCallId: string;
|
2412
|
+
constructor({ toolArgs, toolName, toolCallId, cause, message, }: {
|
2413
|
+
message?: string;
|
2414
|
+
toolArgs: JSONValue;
|
2415
|
+
toolName: string;
|
2416
|
+
toolCallId: string;
|
2417
|
+
cause: unknown;
|
2418
|
+
});
|
2419
|
+
static isInstance(error: unknown): error is ToolExecutionError;
|
2420
|
+
}
|
2421
|
+
|
2422
|
+
declare const symbol$4: unique symbol;
|
2423
|
+
declare class InvalidDataContentError extends AISDKError {
|
2424
|
+
private readonly [symbol$4];
|
2425
|
+
readonly content: unknown;
|
2426
|
+
constructor({ content, cause, message, }: {
|
2427
|
+
content: unknown;
|
2428
|
+
cause?: unknown;
|
2429
|
+
message?: string;
|
2430
|
+
});
|
2431
|
+
static isInstance(error: unknown): error is InvalidDataContentError;
|
2432
|
+
}
|
2433
|
+
|
2434
|
+
declare const symbol$3: unique symbol;
|
2435
|
+
declare class InvalidMessageRoleError extends AISDKError {
|
2436
|
+
private readonly [symbol$3];
|
2437
|
+
readonly role: string;
|
2438
|
+
constructor({ role, message, }: {
|
2439
|
+
role: string;
|
2440
|
+
message?: string;
|
2441
|
+
});
|
2442
|
+
static isInstance(error: unknown): error is InvalidMessageRoleError;
|
2443
|
+
}
|
2444
|
+
|
2445
|
+
declare const symbol$2: unique symbol;
|
2446
|
+
declare class MessageConversionError extends AISDKError {
|
2447
|
+
private readonly [symbol$2];
|
2448
|
+
readonly originalMessage: UIMessage;
|
2449
|
+
constructor({ originalMessage, message, }: {
|
2450
|
+
originalMessage: UIMessage;
|
2451
|
+
message: string;
|
2452
|
+
});
|
2453
|
+
static isInstance(error: unknown): error is MessageConversionError;
|
2454
|
+
}
|
2455
|
+
|
2456
|
+
declare const symbol$1: unique symbol;
|
2457
|
+
declare class DownloadError extends AISDKError {
|
2458
|
+
private readonly [symbol$1];
|
2459
|
+
readonly url: string;
|
2460
|
+
readonly statusCode?: number;
|
2461
|
+
readonly statusText?: string;
|
2462
|
+
constructor({ url, statusCode, statusText, cause, message, }: {
|
2463
|
+
url: string;
|
2464
|
+
statusCode?: number;
|
2465
|
+
statusText?: string;
|
2466
|
+
message?: string;
|
2467
|
+
cause?: unknown;
|
2468
|
+
});
|
2469
|
+
static isInstance(error: unknown): error is DownloadError;
|
2470
|
+
}
|
2471
|
+
|
2472
|
+
declare const symbol: unique symbol;
|
2473
|
+
type RetryErrorReason = 'maxRetriesExceeded' | 'errorNotRetryable' | 'abort';
|
2474
|
+
declare class RetryError extends AISDKError {
|
2475
|
+
private readonly [symbol];
|
2476
|
+
readonly reason: RetryErrorReason;
|
2477
|
+
readonly lastError: unknown;
|
2478
|
+
readonly errors: Array<unknown>;
|
2479
|
+
constructor({ message, reason, errors, }: {
|
2480
|
+
message: string;
|
2481
|
+
reason: RetryErrorReason;
|
2482
|
+
errors: Array<unknown>;
|
2483
|
+
});
|
2484
|
+
static isInstance(error: unknown): error is RetryError;
|
2485
|
+
}
|
2486
|
+
|
2487
|
+
/**
|
2488
|
+
You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
|
2489
|
+
*/
|
2490
|
+
type AssistantResponseSettings = {
|
2491
|
+
/**
|
2492
|
+
The thread ID that the response is associated with.
|
2493
|
+
*/
|
2494
|
+
threadId: string;
|
2495
|
+
/**
|
2496
|
+
The ID of the latest message that the response is associated with.
|
2497
|
+
*/
|
2498
|
+
messageId: string;
|
2499
|
+
};
|
2500
|
+
/**
|
2501
|
+
The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
|
2502
|
+
*/
|
2503
|
+
type AssistantResponseCallback = (options: {
|
2504
|
+
/**
|
2505
|
+
Forwards an assistant message (non-streaming) to the client.
|
2506
|
+
*/
|
2507
|
+
sendMessage: (message: AssistantMessage) => void;
|
2508
|
+
/**
|
2509
|
+
Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
|
2510
|
+
*/
|
2511
|
+
sendDataMessage: (message: DataMessage) => void;
|
2512
|
+
/**
|
2513
|
+
Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
|
2514
|
+
*/
|
2515
|
+
forwardStream: (stream: any) => Promise<any | undefined>;
|
2516
|
+
}) => Promise<void>;
|
2517
|
+
/**
|
2518
|
+
The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
|
2519
|
+
It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
|
2520
|
+
It receives an assistant thread and a current message, and can send messages and data messages to the client.
|
2521
|
+
*/
|
2522
|
+
declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
|
2523
|
+
|
2524
|
+
/**
|
2525
|
+
* Configuration options and helper callback methods for stream lifecycle events.
|
2526
|
+
*/
|
2527
|
+
interface StreamCallbacks {
|
2528
|
+
/** `onStart`: Called once when the stream is initialized. */
|
2529
|
+
onStart?: () => Promise<void> | void;
|
2530
|
+
/**
|
2531
|
+
* `onCompletion`: Called for each tokenized message.
|
2532
|
+
*
|
2533
|
+
* @deprecated Use `onFinal` instead.
|
2534
|
+
*/
|
2535
|
+
onCompletion?: (completion: string) => Promise<void> | void;
|
2536
|
+
/** `onFinal`: Called once when the stream is closed with the final completion message. */
|
2537
|
+
onFinal?: (completion: string) => Promise<void> | void;
|
2538
|
+
/** `onToken`: Called for each tokenized message. */
|
2539
|
+
onToken?: (token: string) => Promise<void> | void;
|
2540
|
+
/** `onText`: Called for each text chunk. */
|
2541
|
+
onText?: (text: string) => Promise<void> | void;
|
2542
|
+
}
|
2543
|
+
|
2544
|
+
type LangChainImageDetail = 'auto' | 'low' | 'high';
|
2545
|
+
type LangChainMessageContentText = {
|
2546
|
+
type: 'text';
|
2547
|
+
text: string;
|
2548
|
+
};
|
2549
|
+
type LangChainMessageContentImageUrl = {
|
2550
|
+
type: 'image_url';
|
2551
|
+
image_url: string | {
|
2552
|
+
url: string;
|
2553
|
+
detail?: LangChainImageDetail;
|
2554
|
+
};
|
2555
|
+
};
|
2556
|
+
type LangChainMessageContentComplex = LangChainMessageContentText | LangChainMessageContentImageUrl | (Record<string, any> & {
|
2557
|
+
type?: 'text' | 'image_url' | string;
|
2558
|
+
}) | (Record<string, any> & {
|
2559
|
+
type?: never;
|
2560
|
+
});
|
2561
|
+
type LangChainMessageContent = string | LangChainMessageContentComplex[];
|
2562
|
+
type LangChainAIMessageChunk = {
|
2563
|
+
content: LangChainMessageContent;
|
2564
|
+
};
|
2565
|
+
type LangChainStreamEvent = {
|
2566
|
+
event: string;
|
2567
|
+
data: any;
|
2568
|
+
};
|
2569
|
+
/**
|
2570
|
+
Converts LangChain output streams to an AI SDK Data Stream.
|
2571
|
+
|
2572
|
+
The following streams are supported:
|
2573
|
+
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
2574
|
+
- `string` streams (LangChain `StringOutputParser` output)
|
2575
|
+
*/
|
2576
|
+
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2577
|
+
declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
|
2578
|
+
init?: ResponseInit;
|
2579
|
+
data?: StreamData;
|
2580
|
+
callbacks?: StreamCallbacks;
|
2581
|
+
}): Response;
|
2582
|
+
declare function mergeIntoDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options: {
|
2583
|
+
dataStream: DataStreamWriter;
|
2584
|
+
callbacks?: StreamCallbacks;
|
2585
|
+
}): void;
|
2586
|
+
|
2587
|
+
declare namespace langchainAdapter {
|
2588
|
+
export {
|
2589
|
+
mergeIntoDataStream$1 as mergeIntoDataStream,
|
2590
|
+
toDataStream$1 as toDataStream,
|
2591
|
+
toDataStreamResponse$1 as toDataStreamResponse,
|
2592
|
+
};
|
2593
|
+
}
|
2594
|
+
|
2595
|
+
type EngineResponse = {
|
2596
|
+
delta: string;
|
2597
|
+
};
|
2598
|
+
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
|
2599
|
+
declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
|
2600
|
+
init?: ResponseInit;
|
2601
|
+
data?: StreamData;
|
2602
|
+
callbacks?: StreamCallbacks;
|
2603
|
+
}): Response;
|
2604
|
+
declare function mergeIntoDataStream(stream: AsyncIterable<EngineResponse>, options: {
|
2605
|
+
dataStream: DataStreamWriter;
|
2606
|
+
callbacks?: StreamCallbacks;
|
2607
|
+
}): void;
|
2608
|
+
|
2609
|
+
declare const llamaindexAdapter_mergeIntoDataStream: typeof mergeIntoDataStream;
|
2610
|
+
declare const llamaindexAdapter_toDataStream: typeof toDataStream;
|
2611
|
+
declare const llamaindexAdapter_toDataStreamResponse: typeof toDataStreamResponse;
|
2612
|
+
declare namespace llamaindexAdapter {
|
2613
|
+
export {
|
2614
|
+
llamaindexAdapter_mergeIntoDataStream as mergeIntoDataStream,
|
2615
|
+
llamaindexAdapter_toDataStream as toDataStream,
|
2616
|
+
llamaindexAdapter_toDataStreamResponse as toDataStreamResponse,
|
2617
|
+
};
|
2618
|
+
}
|
2619
|
+
|
2620
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|