ai 0.0.0-85f9a635-20240518005312 → 0.0.0-8777c42a-20250115032312

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +2863 -0
  2. package/README.md +99 -22
  3. package/dist/index.d.mts +1925 -1592
  4. package/dist/index.d.ts +1925 -1592
  5. package/dist/index.js +5500 -2961
  6. package/dist/index.js.map +1 -1
  7. package/dist/index.mjs +5497 -2916
  8. package/dist/index.mjs.map +1 -1
  9. package/package.json +39 -100
  10. package/react/dist/index.d.mts +8 -563
  11. package/react/dist/index.d.ts +8 -580
  12. package/react/dist/index.js +7 -1395
  13. package/react/dist/index.js.map +1 -1
  14. package/react/dist/index.mjs +12 -1383
  15. package/react/dist/index.mjs.map +1 -1
  16. package/rsc/dist/index.d.ts +340 -197
  17. package/rsc/dist/rsc-server.d.mts +339 -197
  18. package/rsc/dist/rsc-server.mjs +1295 -1347
  19. package/rsc/dist/rsc-server.mjs.map +1 -1
  20. package/rsc/dist/rsc-shared.d.mts +30 -23
  21. package/rsc/dist/rsc-shared.mjs +69 -105
  22. package/rsc/dist/rsc-shared.mjs.map +1 -1
  23. package/test/dist/index.d.mts +67 -0
  24. package/test/dist/index.d.ts +67 -0
  25. package/test/dist/index.js +131 -0
  26. package/test/dist/index.js.map +1 -0
  27. package/test/dist/index.mjs +101 -0
  28. package/test/dist/index.mjs.map +1 -0
  29. package/prompts/dist/index.d.mts +0 -324
  30. package/prompts/dist/index.d.ts +0 -324
  31. package/prompts/dist/index.js +0 -178
  32. package/prompts/dist/index.js.map +0 -1
  33. package/prompts/dist/index.mjs +0 -146
  34. package/prompts/dist/index.mjs.map +0 -1
  35. package/react/dist/index.server.d.mts +0 -17
  36. package/react/dist/index.server.d.ts +0 -17
  37. package/react/dist/index.server.js +0 -50
  38. package/react/dist/index.server.js.map +0 -1
  39. package/react/dist/index.server.mjs +0 -23
  40. package/react/dist/index.server.mjs.map +0 -1
  41. package/solid/dist/index.d.mts +0 -408
  42. package/solid/dist/index.d.ts +0 -408
  43. package/solid/dist/index.js +0 -1072
  44. package/solid/dist/index.js.map +0 -1
  45. package/solid/dist/index.mjs +0 -1044
  46. package/solid/dist/index.mjs.map +0 -1
  47. package/svelte/dist/index.d.mts +0 -484
  48. package/svelte/dist/index.d.ts +0 -484
  49. package/svelte/dist/index.js +0 -1778
  50. package/svelte/dist/index.js.map +0 -1
  51. package/svelte/dist/index.mjs +0 -1749
  52. package/svelte/dist/index.mjs.map +0 -1
  53. package/vue/dist/index.d.mts +0 -402
  54. package/vue/dist/index.d.ts +0 -402
  55. package/vue/dist/index.js +0 -1072
  56. package/vue/dist/index.js.map +0 -1
  57. package/vue/dist/index.mjs +0 -1034
  58. package/vue/dist/index.mjs.map +0 -1
package/dist/index.d.mts CHANGED
@@ -1,9 +1,84 @@
1
- import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
2
- export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
3
- import { z } from 'zod';
1
+ import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
+ export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
+ export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
+ import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
5
+ export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
4
6
  import { ServerResponse } from 'node:http';
5
- import { AssistantStream } from 'openai/lib/AssistantStream';
6
- import { Run } from 'openai/resources/beta/threads/runs/runs';
7
+ import { AttributeValue, Tracer } from '@opentelemetry/api';
8
+ import { z } from 'zod';
9
+ import { ServerResponse as ServerResponse$1 } from 'http';
10
+
11
+ interface DataStreamWriter {
12
+ /**
13
+ * Appends a data part to the stream.
14
+ */
15
+ writeData(value: JSONValue): void;
16
+ /**
17
+ * Appends a message annotation to the stream.
18
+ */
19
+ writeMessageAnnotation(value: JSONValue): void;
20
+ /**
21
+ * Merges the contents of another stream to this stream.
22
+ */
23
+ merge(stream: ReadableStream<DataStreamString>): void;
24
+ /**
25
+ * Error handler that is used by the data stream writer.
26
+ * This is intended for forwarding when merging streams
27
+ * to prevent duplicated error masking.
28
+ */
29
+ onError: ((error: unknown) => string) | undefined;
30
+ }
31
+
32
+ declare function createDataStream({ execute, onError, }: {
33
+ execute: (dataStream: DataStreamWriter) => Promise<void> | void;
34
+ onError?: (error: unknown) => string;
35
+ }): ReadableStream<DataStreamString>;
36
+
37
+ declare function createDataStreamResponse({ status, statusText, headers, execute, onError, }: ResponseInit & {
38
+ execute: (dataStream: DataStreamWriter) => Promise<void> | void;
39
+ onError?: (error: unknown) => string;
40
+ }): Response;
41
+
42
+ declare function pipeDataStreamToResponse(response: ServerResponse, { status, statusText, headers, execute, onError, }: ResponseInit & {
43
+ execute: (writer: DataStreamWriter) => Promise<void> | void;
44
+ onError?: (error: unknown) => string;
45
+ }): void;
46
+
47
+ /**
48
+ * Telemetry configuration.
49
+ */
50
+ type TelemetrySettings = {
51
+ /**
52
+ * Enable or disable telemetry. Disabled by default while experimental.
53
+ */
54
+ isEnabled?: boolean;
55
+ /**
56
+ * Enable or disable input recording. Enabled by default.
57
+ *
58
+ * You might want to disable input recording to avoid recording sensitive
59
+ * information, to reduce data transfers, or to increase performance.
60
+ */
61
+ recordInputs?: boolean;
62
+ /**
63
+ * Enable or disable output recording. Enabled by default.
64
+ *
65
+ * You might want to disable output recording to avoid recording sensitive
66
+ * information, to reduce data transfers, or to increase performance.
67
+ */
68
+ recordOutputs?: boolean;
69
+ /**
70
+ * Identifier for this function. Used to group telemetry data by function.
71
+ */
72
+ functionId?: string;
73
+ /**
74
+ * Additional information to include in the telemetry data.
75
+ */
76
+ metadata?: Record<string, AttributeValue>;
77
+ /**
78
+ * A custom tracer to use for the telemetry data.
79
+ */
80
+ tracer?: Tracer;
81
+ };
7
82
 
8
83
  /**
9
84
  Embedding model that is used by the AI SDK Core functions.
@@ -14,6 +89,16 @@ Embedding.
14
89
  */
15
90
  type Embedding = EmbeddingModelV1Embedding;
16
91
 
92
+ /**
93
+ Image model that is used by the AI SDK Core functions.
94
+ */
95
+ type ImageModel = ImageModelV1;
96
+ /**
97
+ Warning from the model provider for this call. The call will proceed, but e.g.
98
+ some settings might not be supported, which can lead to suboptimal results.
99
+ */
100
+ type ImageGenerationWarning = ImageModelV1CallWarning;
101
+
17
102
  /**
18
103
  Language model that is used by the AI SDK Core functions.
19
104
  */
@@ -32,6 +117,8 @@ Can be one of the following:
32
117
  type FinishReason = LanguageModelV1FinishReason;
33
118
  /**
34
119
  Log probabilities for each token and its top log probabilities.
120
+
121
+ @deprecated Will become a provider extension in the future.
35
122
  */
36
123
  type LogProbs = LanguageModelV1LogProbs;
37
124
  /**
@@ -39,6 +126,134 @@ Warning from the model provider for this call. The call will proceed, but e.g.
39
126
  some settings might not be supported, which can lead to suboptimal results.
40
127
  */
41
128
  type CallWarning = LanguageModelV1CallWarning;
129
+ /**
130
+ Tool choice for the generation. It supports the following settings:
131
+
132
+ - `auto` (default): the model can choose whether and which tools to call.
133
+ - `required`: the model must call a tool. It can choose which tool to call.
134
+ - `none`: the model must not call tools
135
+ - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
136
+ */
137
+ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
138
+ type: 'tool';
139
+ toolName: keyof TOOLS;
140
+ };
141
+
142
+ type LanguageModelRequestMetadata = {
143
+ /**
144
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
145
+ */
146
+ body?: string;
147
+ };
148
+
149
+ type LanguageModelResponseMetadata = {
150
+ /**
151
+ ID for the generated response.
152
+ */
153
+ id: string;
154
+ /**
155
+ Timestamp for the start of the generated response.
156
+ */
157
+ timestamp: Date;
158
+ /**
159
+ The ID of the response model that was used to generate the response.
160
+ */
161
+ modelId: string;
162
+ /**
163
+ Response headers.
164
+ */
165
+ headers?: Record<string, string>;
166
+ };
167
+
168
+ /**
169
+ * Provider for language and text embedding models.
170
+ */
171
+ type Provider = {
172
+ /**
173
+ Returns the language model with the given id.
174
+ The model id is then passed to the provider function to get the model.
175
+
176
+ @param {string} id - The id of the model to return.
177
+
178
+ @returns {LanguageModel} The language model associated with the id
179
+
180
+ @throws {NoSuchModelError} If no such model exists.
181
+ */
182
+ languageModel(modelId: string): LanguageModel;
183
+ /**
184
+ Returns the text embedding model with the given id.
185
+ The model id is then passed to the provider function to get the model.
186
+
187
+ @param {string} id - The id of the model to return.
188
+
189
+ @returns {LanguageModel} The language model associated with the id
190
+
191
+ @throws {NoSuchModelError} If no such model exists.
192
+ */
193
+ textEmbeddingModel(modelId: string): EmbeddingModel<string>;
194
+ };
195
+
196
+ /**
197
+ Additional provider-specific metadata. They are passed through
198
+ to the provider from the AI SDK and enable provider-specific
199
+ functionality that can be fully encapsulated in the provider.
200
+ */
201
+ type ProviderMetadata = LanguageModelV1ProviderMetadata;
202
+
203
+ /**
204
+ Represents the number of tokens used in a prompt and completion.
205
+ */
206
+ type LanguageModelUsage = {
207
+ /**
208
+ The number of tokens used in the prompt.
209
+ */
210
+ promptTokens: number;
211
+ /**
212
+ The number of tokens used in the completion.
213
+ */
214
+ completionTokens: number;
215
+ /**
216
+ The total number of tokens used (promptTokens + completionTokens).
217
+ */
218
+ totalTokens: number;
219
+ };
220
+ /**
221
+ Represents the number of tokens used in an embedding.
222
+ */
223
+ type EmbeddingModelUsage = {
224
+ /**
225
+ The number of tokens used in the embedding.
226
+ */
227
+ tokens: number;
228
+ };
229
+
230
+ /**
231
+ The result of an `embed` call.
232
+ It contains the embedding, the value, and additional information.
233
+ */
234
+ interface EmbedResult<VALUE> {
235
+ /**
236
+ The value that was embedded.
237
+ */
238
+ readonly value: VALUE;
239
+ /**
240
+ The embedding of the value.
241
+ */
242
+ readonly embedding: Embedding;
243
+ /**
244
+ The embedding token usage.
245
+ */
246
+ readonly usage: EmbeddingModelUsage;
247
+ /**
248
+ Optional raw response data.
249
+ */
250
+ readonly rawResponse?: {
251
+ /**
252
+ Response headers.
253
+ */
254
+ headers?: Record<string, string>;
255
+ };
256
+ }
42
257
 
43
258
  /**
44
259
  Embed a value using an embedding model. The type of the value is defined by the embedding model.
@@ -48,10 +263,11 @@ Embed a value using an embedding model. The type of the value is defined by the
48
263
 
49
264
  @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50
265
  @param abortSignal - An optional abort signal that can be used to cancel the call.
266
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
51
267
 
52
268
  @returns A result object that contains the embedding, the value, and additional information.
53
269
  */
54
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
270
+ declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
55
271
  /**
56
272
  The embedding model to use.
57
273
  */
@@ -70,36 +286,34 @@ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
70
286
  Abort signal.
71
287
  */
72
288
  abortSignal?: AbortSignal;
289
+ /**
290
+ Additional headers to include in the request.
291
+ Only applicable for HTTP-based providers.
292
+ */
293
+ headers?: Record<string, string>;
294
+ /**
295
+ * Optional telemetry configuration (experimental).
296
+ */
297
+ experimental_telemetry?: TelemetrySettings;
73
298
  }): Promise<EmbedResult<VALUE>>;
299
+
74
300
  /**
75
- The result of a `embed` call.
76
- It contains the embedding, the value, and additional information.
301
+ The result of a `embedMany` call.
302
+ It contains the embeddings, the values, and additional information.
77
303
  */
78
- declare class EmbedResult<VALUE> {
304
+ interface EmbedManyResult<VALUE> {
79
305
  /**
80
- The value that was embedded.
81
- */
82
- readonly value: VALUE;
306
+ The values that were embedded.
307
+ */
308
+ readonly values: Array<VALUE>;
83
309
  /**
84
- The embedding of the value.
85
- */
86
- readonly embedding: Embedding;
310
+ The embeddings. They are in the same order as the values.
311
+ */
312
+ readonly embeddings: Array<Embedding>;
87
313
  /**
88
- Optional raw response data.
89
- */
90
- readonly rawResponse?: {
91
- /**
92
- Response headers.
93
- */
94
- headers?: Record<string, string>;
95
- };
96
- constructor(options: {
97
- value: VALUE;
98
- embedding: Embedding;
99
- rawResponse?: {
100
- headers?: Record<string, string>;
101
- };
102
- });
314
+ The embedding token usage.
315
+ */
316
+ readonly usage: EmbeddingModelUsage;
103
317
  }
104
318
 
105
319
  /**
@@ -114,10 +328,11 @@ has a limit on how many embeddings can be generated in a single call.
114
328
 
115
329
  @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
116
330
  @param abortSignal - An optional abort signal that can be used to cancel the call.
331
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
117
332
 
118
333
  @returns A result object that contains the embeddings, the value, and additional information.
119
334
  */
120
- declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, }: {
335
+ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
121
336
  /**
122
337
  The embedding model to use.
123
338
  */
@@ -136,31 +351,119 @@ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, }: {
136
351
  Abort signal.
137
352
  */
138
353
  abortSignal?: AbortSignal;
354
+ /**
355
+ Additional headers to include in the request.
356
+ Only applicable for HTTP-based providers.
357
+ */
358
+ headers?: Record<string, string>;
359
+ /**
360
+ * Optional telemetry configuration (experimental).
361
+ */
362
+ experimental_telemetry?: TelemetrySettings;
139
363
  }): Promise<EmbedManyResult<VALUE>>;
364
+
140
365
  /**
141
- The result of a `embedMany` call.
142
- It contains the embeddings, the values, and additional information.
366
+ The result of a `generateImage` call.
367
+ It contains the images and additional information.
143
368
  */
144
- declare class EmbedManyResult<VALUE> {
369
+ interface GenerateImageResult {
145
370
  /**
146
- The values that were embedded.
371
+ The first image that was generated.
147
372
  */
148
- readonly values: Array<VALUE>;
373
+ readonly image: GeneratedImage;
149
374
  /**
150
- The embeddings. They are in the same order as the values.
151
- */
152
- readonly embeddings: Array<Embedding>;
153
- constructor(options: {
154
- values: Array<VALUE>;
155
- embeddings: Array<Embedding>;
156
- });
375
+ The images that were generated.
376
+ */
377
+ readonly images: Array<GeneratedImage>;
378
+ /**
379
+ Warnings for the call, e.g. unsupported settings.
380
+ */
381
+ readonly warnings: Array<ImageGenerationWarning>;
382
+ }
383
+ interface GeneratedImage {
384
+ /**
385
+ Image as a base64 encoded string.
386
+ */
387
+ readonly base64: string;
388
+ /**
389
+ Image as a Uint8Array.
390
+ */
391
+ readonly uint8Array: Uint8Array;
157
392
  }
158
393
 
159
- type TokenUsage = {
160
- promptTokens: number;
161
- completionTokens: number;
162
- totalTokens: number;
163
- };
394
+ /**
395
+ Generates images using an image model.
396
+
397
+ @param model - The image model to use.
398
+ @param prompt - The prompt that should be used to generate the image.
399
+ @param n - Number of images to generate. Default: 1.
400
+ @param size - Size of the images to generate. Must have the format `{width}x{height}`.
401
+ @param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
402
+ @param seed - Seed for the image generation.
403
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
404
+ as body parameters.
405
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
406
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
407
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
408
+
409
+ @returns A result object that contains the generated images.
410
+ */
411
+ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
412
+ /**
413
+ The image model to use.
414
+ */
415
+ model: ImageModelV1;
416
+ /**
417
+ The prompt that should be used to generate the image.
418
+ */
419
+ prompt: string;
420
+ /**
421
+ Number of images to generate.
422
+ */
423
+ n?: number;
424
+ /**
425
+ Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
426
+ */
427
+ size?: `${number}x${number}`;
428
+ /**
429
+ Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
430
+ */
431
+ aspectRatio?: `${number}:${number}`;
432
+ /**
433
+ Seed for the image generation. If not provided, the default seed will be used.
434
+ */
435
+ seed?: number;
436
+ /**
437
+ Additional provider-specific options that are passed through to the provider
438
+ as body parameters.
439
+
440
+ The outer record is keyed by the provider name, and the inner
441
+ record is keyed by the provider-specific metadata key.
442
+ ```ts
443
+ {
444
+ "openai": {
445
+ "style": "vivid"
446
+ }
447
+ }
448
+ ```
449
+ */
450
+ providerOptions?: Record<string, Record<string, JSONValue>>;
451
+ /**
452
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
453
+
454
+ @default 2
455
+ */
456
+ maxRetries?: number;
457
+ /**
458
+ Abort signal.
459
+ */
460
+ abortSignal?: AbortSignal;
461
+ /**
462
+ Additional headers to include in the request.
463
+ Only applicable for HTTP-based providers.
464
+ */
465
+ headers?: Record<string, string>;
466
+ }): Promise<GenerateImageResult>;
164
467
 
165
468
  type CallSettings = {
166
469
  /**
@@ -186,13 +489,18 @@ type CallSettings = {
186
489
  */
187
490
  topP?: number;
188
491
  /**
492
+ Only sample from the top K options for each subsequent token.
493
+
494
+ Used to remove "long tail" low probability responses.
495
+ Recommended for advanced use cases only. You usually only need to use temperature.
496
+ */
497
+ topK?: number;
498
+ /**
189
499
  Presence penalty setting. It affects the likelihood of the model to
190
500
  repeat information that is already in the prompt.
191
501
 
192
502
  The presence penalty is a number between -1 (increase repetition)
193
503
  and 1 (maximum penalty, decrease repetition). 0 means no penalty.
194
-
195
- @default 0
196
504
  */
197
505
  presencePenalty?: number;
198
506
  /**
@@ -201,11 +509,15 @@ type CallSettings = {
201
509
 
202
510
  The frequency penalty is a number between -1 (increase repetition)
203
511
  and 1 (maximum penalty, decrease repetition). 0 means no penalty.
204
-
205
- @default 0
206
512
  */
207
513
  frequencyPenalty?: number;
208
514
  /**
515
+ Stop sequences.
516
+ If set, the model will stop generating text when one of the stop sequences is generated.
517
+ Providers may have limits on the number of stop sequences.
518
+ */
519
+ stopSequences?: string[];
520
+ /**
209
521
  The seed (integer) to use for random sampling. If set and supported
210
522
  by the model, calls will generate deterministic results.
211
523
  */
@@ -220,36 +532,42 @@ type CallSettings = {
220
532
  Abort signal.
221
533
  */
222
534
  abortSignal?: AbortSignal;
535
+ /**
536
+ Additional HTTP headers to be sent with the request.
537
+ Only applicable for HTTP-based providers.
538
+ */
539
+ headers?: Record<string, string | undefined>;
223
540
  };
224
541
 
225
542
  /**
226
543
  Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
227
544
  */
228
545
  type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
229
- /**
230
- Converts data content to a base64-encoded string.
231
-
232
- @param content - Data content to convert.
233
- @returns Base64-encoded string.
234
- */
235
- declare function convertDataContentToBase64String(content: DataContent): string;
236
- /**
237
- Converts data content to a Uint8Array.
238
546
 
239
- @param content - Data content to convert.
240
- @returns Uint8Array.
241
- */
242
- declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
547
+ type ToolResultContent = Array<{
548
+ type: 'text';
549
+ text: string;
550
+ } | {
551
+ type: 'image';
552
+ data: string;
553
+ mimeType?: string;
554
+ }>;
243
555
 
244
556
  /**
245
557
  Text content part of a prompt. It contains a string of text.
246
558
  */
247
- interface TextPart$1 {
559
+ interface TextPart {
248
560
  type: 'text';
249
561
  /**
250
562
  The text content.
251
563
  */
252
564
  text: string;
565
+ /**
566
+ Additional provider-specific metadata. They are passed through
567
+ to the provider from the AI SDK and enable provider-specific
568
+ functionality that can be fully encapsulated in the provider.
569
+ */
570
+ experimental_providerMetadata?: ProviderMetadata;
253
571
  }
254
572
  /**
255
573
  Image content part of a prompt. It contains an image.
@@ -267,6 +585,35 @@ interface ImagePart {
267
585
  Optional mime type of the image.
268
586
  */
269
587
  mimeType?: string;
588
+ /**
589
+ Additional provider-specific metadata. They are passed through
590
+ to the provider from the AI SDK and enable provider-specific
591
+ functionality that can be fully encapsulated in the provider.
592
+ */
593
+ experimental_providerMetadata?: ProviderMetadata;
594
+ }
595
+ /**
596
+ File content part of a prompt. It contains a file.
597
+ */
598
+ interface FilePart {
599
+ type: 'file';
600
+ /**
601
+ File data. Can either be:
602
+
603
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
604
+ - URL: a URL that points to the image
605
+ */
606
+ data: DataContent | URL;
607
+ /**
608
+ Mime type of the file.
609
+ */
610
+ mimeType: string;
611
+ /**
612
+ Additional provider-specific metadata. They are passed through
613
+ to the provider from the AI SDK and enable provider-specific
614
+ functionality that can be fully encapsulated in the provider.
615
+ */
616
+ experimental_providerMetadata?: ProviderMetadata;
270
617
  }
271
618
  /**
272
619
  Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
@@ -285,6 +632,12 @@ interface ToolCallPart {
285
632
  Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
286
633
  */
287
634
  args: unknown;
635
+ /**
636
+ Additional provider-specific metadata. They are passed through
637
+ to the provider from the AI SDK and enable provider-specific
638
+ functionality that can be fully encapsulated in the provider.
639
+ */
640
+ experimental_providerMetadata?: ProviderMetadata;
288
641
  }
289
642
  /**
290
643
  Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
@@ -304,16 +657,21 @@ interface ToolResultPart {
304
657
  */
305
658
  result: unknown;
306
659
  /**
660
+ Multi-part content of the tool result. Only for tools that support multipart results.
661
+ */
662
+ experimental_content?: ToolResultContent;
663
+ /**
307
664
  Optional flag if the result is an error or an error message.
308
665
  */
309
666
  isError?: boolean;
667
+ /**
668
+ Additional provider-specific metadata. They are passed through
669
+ to the provider from the AI SDK and enable provider-specific
670
+ functionality that can be fully encapsulated in the provider.
671
+ */
672
+ experimental_providerMetadata?: ProviderMetadata;
310
673
  }
311
674
 
312
- /**
313
- A message that can be used in the `messages` field of a prompt.
314
- It can be a user message, an assistant message, or a tool message.
315
- */
316
- type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
317
675
  /**
318
676
  A system message. It can contain system information.
319
677
 
@@ -324,59 +682,80 @@ type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage |
324
682
  type CoreSystemMessage = {
325
683
  role: 'system';
326
684
  content: string;
685
+ /**
686
+ Additional provider-specific metadata. They are passed through
687
+ to the provider from the AI SDK and enable provider-specific
688
+ functionality that can be fully encapsulated in the provider.
689
+ */
690
+ experimental_providerMetadata?: ProviderMetadata;
327
691
  };
328
- /**
329
- * @deprecated Use `CoreMessage` instead.
330
- */
331
- type ExperimentalMessage = CoreMessage;
332
692
  /**
333
693
  A user message. It can contain text or a combination of text and images.
334
694
  */
335
695
  type CoreUserMessage = {
336
696
  role: 'user';
337
697
  content: UserContent;
698
+ /**
699
+ Additional provider-specific metadata. They are passed through
700
+ to the provider from the AI SDK and enable provider-specific
701
+ functionality that can be fully encapsulated in the provider.
702
+ */
703
+ experimental_providerMetadata?: ProviderMetadata;
338
704
  };
339
- /**
340
- * @deprecated Use `CoreUserMessage` instead.
341
- */
342
- type ExperimentalUserMessage = CoreUserMessage;
343
705
  /**
344
706
  Content of a user message. It can be a string or an array of text and image parts.
345
707
  */
346
- type UserContent = string | Array<TextPart$1 | ImagePart>;
708
+ type UserContent = string | Array<TextPart | ImagePart | FilePart>;
347
709
  /**
348
710
  An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
349
711
  */
350
712
  type CoreAssistantMessage = {
351
713
  role: 'assistant';
352
714
  content: AssistantContent;
715
+ /**
716
+ Additional provider-specific metadata. They are passed through
717
+ to the provider from the AI SDK and enable provider-specific
718
+ functionality that can be fully encapsulated in the provider.
719
+ */
720
+ experimental_providerMetadata?: ProviderMetadata;
353
721
  };
354
- /**
355
- * @deprecated Use `CoreAssistantMessage` instead.
356
- */
357
- type ExperimentalAssistantMessage = CoreAssistantMessage;
358
722
  /**
359
723
  Content of an assistant message. It can be a string or an array of text and tool call parts.
360
724
  */
361
- type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
725
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
362
726
  /**
363
727
  A tool message. It contains the result of one or more tool calls.
364
728
  */
365
729
  type CoreToolMessage = {
366
730
  role: 'tool';
367
731
  content: ToolContent;
368
- };
369
- /**
370
- * @deprecated Use `CoreToolMessage` instead.
371
- */
372
- type ExperimentalToolMessage = CoreToolMessage;
732
+ /**
733
+ Additional provider-specific metadata. They are passed through
734
+ to the provider from the AI SDK and enable provider-specific
735
+ functionality that can be fully encapsulated in the provider.
736
+ */
737
+ experimental_providerMetadata?: ProviderMetadata;
738
+ };
373
739
  /**
374
740
  Content of a tool message. It is an array of tool result parts.
375
741
  */
376
742
  type ToolContent = Array<ToolResultPart>;
743
+ /**
744
+ A message that can be used in the `messages` field of a prompt.
745
+ It can be a user message, an assistant message, or a tool message.
746
+ */
747
+ type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
748
+
749
+ type UIMessage = {
750
+ role: 'system' | 'user' | 'assistant' | 'data';
751
+ content: string;
752
+ toolInvocations?: ToolInvocation[];
753
+ experimental_attachments?: Attachment[];
754
+ };
377
755
 
378
756
  /**
379
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
757
+ Prompt part of the AI function options.
758
+ It contains a system message, a simple text prompt, or a list of messages.
380
759
  */
381
760
  type Prompt = {
382
761
  /**
@@ -388,291 +767,650 @@ type Prompt = {
388
767
  */
389
768
  prompt?: string;
390
769
  /**
391
- A list of messsages. You can either use `prompt` or `messages` but not both.
770
+ A list of messages. You can either use `prompt` or `messages` but not both.
392
771
  */
393
- messages?: Array<CoreMessage>;
772
+ messages?: Array<CoreMessage> | Array<UIMessage>;
394
773
  };
395
774
 
775
+ /**
776
+ The result of a `generateObject` call.
777
+ */
778
+ interface GenerateObjectResult<OBJECT> {
779
+ /**
780
+ The generated object (typed according to the schema).
781
+ */
782
+ readonly object: OBJECT;
783
+ /**
784
+ The reason why the generation finished.
785
+ */
786
+ readonly finishReason: FinishReason;
787
+ /**
788
+ The token usage of the generated text.
789
+ */
790
+ readonly usage: LanguageModelUsage;
791
+ /**
792
+ Warnings from the model provider (e.g. unsupported settings).
793
+ */
794
+ readonly warnings: CallWarning[] | undefined;
795
+ /**
796
+ Additional request information.
797
+ */
798
+ readonly request: LanguageModelRequestMetadata;
799
+ /**
800
+ Additional response information.
801
+ */
802
+ readonly response: LanguageModelResponseMetadata;
803
+ /**
804
+ Logprobs for the completion.
805
+ `undefined` if the mode does not support logprobs or if was not enabled.
806
+
807
+ @deprecated Will become a provider extension in the future.
808
+ */
809
+ readonly logprobs: LogProbs | undefined;
810
+ /**
811
+ Additional provider-specific metadata. They are passed through
812
+ from the provider to the AI SDK and enable provider-specific
813
+ results that can be fully encapsulated in the provider.
814
+ */
815
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
816
+ /**
817
+ Converts the object to a JSON response.
818
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
819
+ */
820
+ toJsonResponse(init?: ResponseInit): Response;
821
+ }
822
+
396
823
  /**
397
824
  Generate a structured, typed object for a given prompt and schema using a language model.
398
825
 
399
826
  This function does not stream the output. If you want to stream the output, use `streamObject` instead.
400
827
 
401
- @param model - The language model to use.
402
-
403
- @param schema - The schema of the object that the model should generate.
404
- @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
405
-
406
- @param system - A system message that will be part of the prompt.
407
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
408
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
409
-
410
- @param maxTokens - Maximum number of tokens to generate.
411
- @param temperature - Temperature setting.
412
- The value is passed through to the provider. The range depends on the provider and model.
413
- It is recommended to set either `temperature` or `topP`, but not both.
414
- @param topP - Nucleus sampling.
415
- The value is passed through to the provider. The range depends on the provider and model.
416
- It is recommended to set either `temperature` or `topP`, but not both.
417
- @param presencePenalty - Presence penalty setting.
418
- It affects the likelihood of the model to repeat information that is already in the prompt.
419
- The value is passed through to the provider. The range depends on the provider and model.
420
- @param frequencyPenalty - Frequency penalty setting.
421
- It affects the likelihood of the model to repeatedly use the same words or phrases.
422
- The value is passed through to the provider. The range depends on the provider and model.
423
- @param seed - The seed (integer) to use for random sampling.
424
- If set and supported by the model, calls will generate deterministic results.
425
-
426
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
427
- @param abortSignal - An optional abort signal that can be used to cancel the call.
428
-
429
828
  @returns
430
829
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
431
830
  */
432
- declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
831
+ declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
832
+ output?: 'object' | undefined;
433
833
  /**
434
834
  The language model to use.
435
- */
835
+ */
436
836
  model: LanguageModel;
437
837
  /**
438
838
  The schema of the object that the model should generate.
439
- */
440
- schema: z.Schema<T>;
839
+ */
840
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
841
+ /**
842
+ Optional name of the output that should be generated.
843
+ Used by some providers for additional LLM guidance, e.g.
844
+ via tool or schema name.
845
+ */
846
+ schemaName?: string;
847
+ /**
848
+ Optional description of the output that should be generated.
849
+ Used by some providers for additional LLM guidance, e.g.
850
+ via tool or schema description.
851
+ */
852
+ schemaDescription?: string;
441
853
  /**
442
854
  The mode to use for object generation.
443
855
 
444
- The Zod schema is converted in a JSON schema and used in one of the following ways
856
+ The schema is converted into a JSON schema and used in one of the following ways
445
857
 
446
858
  - 'auto': The provider will choose the best mode for the model.
447
- - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
448
- - 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
449
- - 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
859
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
860
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
450
861
 
451
862
  Please note that most providers do not support all modes.
452
863
 
453
864
  Default and recommended: 'auto' (best mode for the model).
454
- */
455
- mode?: 'auto' | 'json' | 'tool' | 'grammar';
456
- }): Promise<GenerateObjectResult<T>>;
457
- /**
458
- The result of a `generateObject` call.
459
- */
460
- declare class GenerateObjectResult<T> {
461
- /**
462
- The generated object (typed according to the schema).
463
- */
464
- readonly object: T;
465
- /**
466
- The reason why the generation finished.
467
- */
468
- readonly finishReason: FinishReason;
865
+ */
866
+ mode?: 'auto' | 'json' | 'tool';
469
867
  /**
470
- The token usage of the generated text.
868
+ Optional telemetry configuration (experimental).
471
869
  */
472
- readonly usage: TokenUsage;
870
+ experimental_telemetry?: TelemetrySettings;
473
871
  /**
474
- Warnings from the model provider (e.g. unsupported settings)
475
- */
476
- readonly warnings: CallWarning[] | undefined;
872
+ Additional provider-specific metadata. They are passed through
873
+ to the provider from the AI SDK and enable provider-specific
874
+ functionality that can be fully encapsulated in the provider.
875
+ */
876
+ experimental_providerMetadata?: ProviderMetadata;
477
877
  /**
478
- Optional raw response data.
479
- */
480
- rawResponse?: {
481
- /**
482
- Response headers.
878
+ * Internal. For test use only. May change without notice.
483
879
  */
484
- headers?: Record<string, string>;
880
+ _internal?: {
881
+ generateId?: () => string;
882
+ currentDate?: () => Date;
485
883
  };
486
- /**
487
- Logprobs for the completion.
488
- `undefined` if the mode does not support logprobs or if was not enabled
489
- */
490
- readonly logprobs: LogProbs | undefined;
491
- constructor(options: {
492
- object: T;
493
- finishReason: FinishReason;
494
- usage: TokenUsage;
495
- warnings: CallWarning[] | undefined;
496
- rawResponse?: {
497
- headers?: Record<string, string>;
498
- };
499
- logprobs: LogProbs | undefined;
500
- });
501
- }
884
+ }): Promise<GenerateObjectResult<OBJECT>>;
502
885
  /**
503
- * @deprecated Use `generateObject` instead.
504
- */
505
- declare const experimental_generateObject: typeof generateObject;
506
-
507
- type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
886
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
508
887
 
509
- /**
510
- Create a type from an object with all keys and nested keys set to optional.
511
- The helper supports normal objects and Zod schemas (which are resolved automatically).
512
- It always recurses into arrays.
888
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
513
889
 
514
- Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
890
+ @return
891
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
515
892
  */
516
- type DeepPartial<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends z.Schema<any> ? DeepPartial<T['_type']> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartial<ItemType | undefined>> : Array<DeepPartial<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
517
- type PartialMap<KeyType, ValueType> = {} & Map<DeepPartial<KeyType>, DeepPartial<ValueType>>;
518
- type PartialSet<T> = {} & Set<DeepPartial<T>>;
519
- type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartial<KeyType>, DeepPartial<ValueType>>;
520
- type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartial<T>>;
521
- type PartialObject<ObjectType extends object> = {
522
- [KeyType in keyof ObjectType]?: DeepPartial<ObjectType[KeyType]>;
523
- };
524
-
525
- /**
526
- Generate a structured, typed object for a given prompt and schema using a language model.
527
-
528
- This function streams the output. If you do not want to stream the output, use `generateObject` instead.
893
+ declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
894
+ output: 'array';
895
+ /**
896
+ The language model to use.
897
+ */
898
+ model: LanguageModel;
899
+ /**
900
+ The element schema of the array that the model should generate.
901
+ */
902
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
903
+ /**
904
+ Optional name of the array that should be generated.
905
+ Used by some providers for additional LLM guidance, e.g.
906
+ via tool or schema name.
907
+ */
908
+ schemaName?: string;
909
+ /**
910
+ Optional description of the array that should be generated.
911
+ Used by some providers for additional LLM guidance, e.g.
912
+ via tool or schema description.
913
+ */
914
+ schemaDescription?: string;
915
+ /**
916
+ The mode to use for object generation.
529
917
 
530
- @param model - The language model to use.
918
+ The schema is converted into a JSON schema and used in one of the following ways
531
919
 
532
- @param schema - The schema of the object that the model should generate.
533
- @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
920
+ - 'auto': The provider will choose the best mode for the model.
921
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
922
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
534
923
 
535
- @param system - A system message that will be part of the prompt.
536
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
537
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
924
+ Please note that most providers do not support all modes.
538
925
 
539
- @param maxTokens - Maximum number of tokens to generate.
540
- @param temperature - Temperature setting.
541
- The value is passed through to the provider. The range depends on the provider and model.
542
- It is recommended to set either `temperature` or `topP`, but not both.
543
- @param topP - Nucleus sampling.
544
- The value is passed through to the provider. The range depends on the provider and model.
545
- It is recommended to set either `temperature` or `topP`, but not both.
546
- @param presencePenalty - Presence penalty setting.
547
- It affects the likelihood of the model to repeat information that is already in the prompt.
548
- The value is passed through to the provider. The range depends on the provider and model.
549
- @param frequencyPenalty - Frequency penalty setting.
550
- It affects the likelihood of the model to repeatedly use the same words or phrases.
551
- The value is passed through to the provider. The range depends on the provider and model.
552
- @param seed - The seed (integer) to use for random sampling.
553
- If set and supported by the model, calls will generate deterministic results.
926
+ Default and recommended: 'auto' (best mode for the model).
927
+ */
928
+ mode?: 'auto' | 'json' | 'tool';
929
+ /**
930
+ Optional telemetry configuration (experimental).
931
+ */
932
+ experimental_telemetry?: TelemetrySettings;
933
+ /**
934
+ Additional provider-specific metadata. They are passed through
935
+ to the provider from the AI SDK and enable provider-specific
936
+ functionality that can be fully encapsulated in the provider.
937
+ */
938
+ experimental_providerMetadata?: ProviderMetadata;
939
+ /**
940
+ * Internal. For test use only. May change without notice.
941
+ */
942
+ _internal?: {
943
+ generateId?: () => string;
944
+ currentDate?: () => Date;
945
+ };
946
+ }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
947
+ /**
948
+ Generate a value from an enum (limited list of string values) using a language model.
554
949
 
555
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
556
- @param abortSignal - An optional abort signal that can be used to cancel the call.
950
+ This function does not stream the output.
557
951
 
558
952
  @return
559
- A result object for accessing the partial object stream and additional information.
953
+ A result object that contains the generated value, the finish reason, the token usage, and additional information.
560
954
  */
561
- declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
955
+ declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
956
+ output: 'enum';
562
957
  /**
563
958
  The language model to use.
564
- */
959
+ */
565
960
  model: LanguageModel;
566
961
  /**
567
- The schema of the object that the model should generate.
568
- */
569
- schema: z.Schema<T>;
962
+ The enum values that the model should use.
963
+ */
964
+ enum: Array<ENUM>;
570
965
  /**
571
966
  The mode to use for object generation.
572
967
 
573
- The Zod schema is converted in a JSON schema and used in one of the following ways
968
+ The schema is converted into a JSON schema and used in one of the following ways
574
969
 
575
970
  - 'auto': The provider will choose the best mode for the model.
576
- - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
577
- - 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
578
- - 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
971
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
972
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
579
973
 
580
974
  Please note that most providers do not support all modes.
581
975
 
582
976
  Default and recommended: 'auto' (best mode for the model).
977
+ */
978
+ mode?: 'auto' | 'json' | 'tool';
979
+ /**
980
+ Optional telemetry configuration (experimental).
981
+ */
982
+ experimental_telemetry?: TelemetrySettings;
983
+ /**
984
+ Additional provider-specific metadata. They are passed through
985
+ to the provider from the AI SDK and enable provider-specific
986
+ functionality that can be fully encapsulated in the provider.
987
+ */
988
+ experimental_providerMetadata?: ProviderMetadata;
989
+ /**
990
+ * Internal. For test use only. May change without notice.
583
991
  */
584
- mode?: 'auto' | 'json' | 'tool' | 'grammar';
585
- }): Promise<StreamObjectResult<T>>;
586
- type ObjectStreamPartInput = {
587
- type: 'error';
588
- error: unknown;
589
- } | {
590
- type: 'finish';
591
- finishReason: FinishReason;
592
- logprobs?: LogProbs;
593
- usage: {
594
- promptTokens: number;
595
- completionTokens: number;
596
- totalTokens: number;
992
+ _internal?: {
993
+ generateId?: () => string;
994
+ currentDate?: () => Date;
597
995
  };
598
- };
599
- type ObjectStreamPart<T> = ObjectStreamPartInput | {
600
- type: 'object';
601
- object: DeepPartial<T>;
602
- };
996
+ }): Promise<GenerateObjectResult<ENUM>>;
603
997
  /**
604
- The result of a `streamObject` call that contains the partial object stream and additional information.
998
+ Generate JSON with any schema for a given prompt using a language model.
999
+
1000
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
1001
+
1002
+ @returns
1003
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
605
1004
  */
606
- declare class StreamObjectResult<T> {
607
- private readonly originalStream;
1005
+ declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1006
+ output: 'no-schema';
608
1007
  /**
609
- Warnings from the model provider (e.g. unsupported settings)
610
- */
611
- readonly warnings: CallWarning[] | undefined;
1008
+ The language model to use.
1009
+ */
1010
+ model: LanguageModel;
612
1011
  /**
613
- Optional raw response data.
1012
+ The mode to use for object generation. Must be "json" for no-schema output.
1013
+ */
1014
+ mode?: 'json';
1015
+ /**
1016
+ Optional telemetry configuration (experimental).
614
1017
  */
615
- rawResponse?: {
616
- /**
617
- Response headers.
1018
+ experimental_telemetry?: TelemetrySettings;
1019
+ /**
1020
+ Additional provider-specific metadata. They are passed through
1021
+ to the provider from the AI SDK and enable provider-specific
1022
+ functionality that can be fully encapsulated in the provider.
1023
+ */
1024
+ experimental_providerMetadata?: ProviderMetadata;
1025
+ /**
1026
+ * Internal. For test use only. May change without notice.
618
1027
  */
619
- headers?: Record<string, string>;
1028
+ _internal?: {
1029
+ generateId?: () => string;
1030
+ currentDate?: () => Date;
620
1031
  };
621
- constructor({ stream, warnings, rawResponse, }: {
622
- stream: ReadableStream<string | ObjectStreamPartInput>;
623
- warnings: CallWarning[] | undefined;
624
- rawResponse?: {
625
- headers?: Record<string, string>;
626
- };
627
- });
628
- get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
629
- get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
630
- }
631
- /**
632
- * @deprecated Use `streamObject` instead.
633
- */
634
- declare const experimental_streamObject: typeof streamObject;
1032
+ }): Promise<GenerateObjectResult<JSONValue>>;
635
1033
 
636
- /**
637
- A tool contains the description and the schema of the input that the tool expects.
638
- This enables the language model to generate the input.
1034
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
639
1035
 
640
- The tool can also contain an optional execute function for the actual execution function of the tool.
1036
+ /**
1037
+ The result of a `streamObject` call that contains the partial object stream and additional information.
641
1038
  */
642
- interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
1039
+ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
643
1040
  /**
644
- An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
645
- */
646
- description?: string;
1041
+ Warnings from the model provider (e.g. unsupported settings)
1042
+ */
1043
+ readonly warnings: Promise<CallWarning[] | undefined>;
647
1044
  /**
648
- The schema of the input that the tool expects. The language model will use this to generate the input.
649
- It is also used to validate the output of the language model.
650
- Use descriptions to make the input understandable for the language model.
1045
+ The token usage of the generated response. Resolved when the response is finished.
1046
+ */
1047
+ readonly usage: Promise<LanguageModelUsage>;
1048
+ /**
1049
+ Additional provider-specific metadata. They are passed through
1050
+ from the provider to the AI SDK and enable provider-specific
1051
+ results that can be fully encapsulated in the provider.
651
1052
  */
652
- parameters: PARAMETERS;
1053
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
653
1054
  /**
654
- An async function that is called with the arguments from the tool call and produces a result.
655
- If not provided, the tool will not be executed automatically.
1055
+ Additional request information from the last step.
1056
+ */
1057
+ readonly request: Promise<LanguageModelRequestMetadata>;
1058
+ /**
1059
+ Additional response information.
1060
+ */
1061
+ readonly response: Promise<LanguageModelResponseMetadata>;
1062
+ /**
1063
+ The generated object (typed according to the schema). Resolved when the response is finished.
1064
+ */
1065
+ readonly object: Promise<RESULT>;
1066
+ /**
1067
+ Stream of partial objects. It gets more complete as the stream progresses.
1068
+
1069
+ Note that the partial object is not validated.
1070
+ If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
1071
+ */
1072
+ readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
1073
+ /**
1074
+ * Stream over complete array elements. Only available if the output strategy is set to `array`.
656
1075
  */
657
- execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
1076
+ readonly elementStream: ELEMENT_STREAM;
1077
+ /**
1078
+ Text stream of the JSON representation of the generated object. It contains text chunks.
1079
+ When the stream is finished, the object is valid JSON that can be parsed.
1080
+ */
1081
+ readonly textStream: AsyncIterableStream<string>;
1082
+ /**
1083
+ Stream of different types of events, including partial objects, errors, and finish events.
1084
+ Only errors that stop the stream, such as network errors, are thrown.
1085
+ */
1086
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
1087
+ /**
1088
+ Writes text delta output to a Node.js response-like object.
1089
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1090
+ writes each text delta as a separate chunk.
1091
+
1092
+ @param response A Node.js response-like object (ServerResponse).
1093
+ @param init Optional headers, status code, and status text.
1094
+ */
1095
+ pipeTextStreamToResponse(response: ServerResponse$1, init?: ResponseInit): void;
1096
+ /**
1097
+ Creates a simple text stream response.
1098
+ The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
1099
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
1100
+ Non-text-delta events are ignored.
1101
+
1102
+ @param init Optional headers, status code, and status text.
1103
+ */
1104
+ toTextStreamResponse(init?: ResponseInit): Response;
658
1105
  }
659
- /**
660
- Helper function for inferring the execute args of a tool.
661
- */
662
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
663
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
664
- }): CoreTool<PARAMETERS, RESULT> & {
665
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
666
- };
667
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
668
- execute?: undefined;
669
- }): CoreTool<PARAMETERS, RESULT> & {
670
- execute: undefined;
1106
+ type ObjectStreamPart<PARTIAL> = {
1107
+ type: 'object';
1108
+ object: PARTIAL;
1109
+ } | {
1110
+ type: 'text-delta';
1111
+ textDelta: string;
1112
+ } | {
1113
+ type: 'error';
1114
+ error: unknown;
1115
+ } | {
1116
+ type: 'finish';
1117
+ finishReason: FinishReason;
1118
+ logprobs?: LogProbs;
1119
+ usage: LanguageModelUsage;
1120
+ response: LanguageModelResponseMetadata;
1121
+ providerMetadata?: ProviderMetadata;
671
1122
  };
1123
+
1124
+ type OnFinishCallback<RESULT> = (event: {
1125
+ /**
1126
+ The token usage of the generated response.
1127
+ */
1128
+ usage: LanguageModelUsage;
1129
+ /**
1130
+ The generated object. Can be undefined if the final object does not match the schema.
1131
+ */
1132
+ object: RESULT | undefined;
1133
+ /**
1134
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
1135
+ */
1136
+ error: unknown | undefined;
1137
+ /**
1138
+ Response metadata.
1139
+ */
1140
+ response: LanguageModelResponseMetadata;
1141
+ /**
1142
+ Warnings from the model provider (e.g. unsupported settings).
1143
+ */
1144
+ warnings?: CallWarning[];
1145
+ /**
1146
+ Additional provider-specific metadata. They are passed through
1147
+ from the provider to the AI SDK and enable provider-specific
1148
+ results that can be fully encapsulated in the provider.
1149
+ */
1150
+ experimental_providerMetadata: ProviderMetadata | undefined;
1151
+ }) => Promise<void> | void;
1152
+ /**
1153
+ Generate a structured, typed object for a given prompt and schema using a language model.
1154
+
1155
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1156
+
1157
+ @return
1158
+ A result object for accessing the partial object stream and additional information.
1159
+ */
1160
+ declare function streamObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1161
+ output?: 'object' | undefined;
1162
+ /**
1163
+ The language model to use.
1164
+ */
1165
+ model: LanguageModel;
1166
+ /**
1167
+ The schema of the object that the model should generate.
1168
+ */
1169
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
1170
+ /**
1171
+ Optional name of the output that should be generated.
1172
+ Used by some providers for additional LLM guidance, e.g.
1173
+ via tool or schema name.
1174
+ */
1175
+ schemaName?: string;
1176
+ /**
1177
+ Optional description of the output that should be generated.
1178
+ Used by some providers for additional LLM guidance, e.g.
1179
+ via tool or schema description.
1180
+ */
1181
+ schemaDescription?: string;
1182
+ /**
1183
+ The mode to use for object generation.
1184
+
1185
+ The schema is converted into a JSON schema and used in one of the following ways
1186
+
1187
+ - 'auto': The provider will choose the best mode for the model.
1188
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1189
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1190
+
1191
+ Please note that most providers do not support all modes.
1192
+
1193
+ Default and recommended: 'auto' (best mode for the model).
1194
+ */
1195
+ mode?: 'auto' | 'json' | 'tool';
1196
+ /**
1197
+ Optional telemetry configuration (experimental).
1198
+ */
1199
+ experimental_telemetry?: TelemetrySettings;
1200
+ /**
1201
+ Additional provider-specific metadata. They are passed through
1202
+ to the provider from the AI SDK and enable provider-specific
1203
+ functionality that can be fully encapsulated in the provider.
1204
+ */
1205
+ experimental_providerMetadata?: ProviderMetadata;
1206
+ /**
1207
+ Callback that is called when the LLM response and the final object validation are finished.
1208
+ */
1209
+ onFinish?: OnFinishCallback<OBJECT>;
1210
+ /**
1211
+ * Internal. For test use only. May change without notice.
1212
+ */
1213
+ _internal?: {
1214
+ generateId?: () => string;
1215
+ currentDate?: () => Date;
1216
+ now?: () => number;
1217
+ };
1218
+ }): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
1219
+ /**
1220
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
1221
+
1222
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1223
+
1224
+ @return
1225
+ A result object for accessing the partial object stream and additional information.
1226
+ */
1227
+ declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1228
+ output: 'array';
1229
+ /**
1230
+ The language model to use.
1231
+ */
1232
+ model: LanguageModel;
1233
+ /**
1234
+ The element schema of the array that the model should generate.
1235
+ */
1236
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
1237
+ /**
1238
+ Optional name of the array that should be generated.
1239
+ Used by some providers for additional LLM guidance, e.g.
1240
+ via tool or schema name.
1241
+ */
1242
+ schemaName?: string;
1243
+ /**
1244
+ Optional description of the array that should be generated.
1245
+ Used by some providers for additional LLM guidance, e.g.
1246
+ via tool or schema description.
1247
+ */
1248
+ schemaDescription?: string;
1249
+ /**
1250
+ The mode to use for object generation.
1251
+
1252
+ The schema is converted into a JSON schema and used in one of the following ways
1253
+
1254
+ - 'auto': The provider will choose the best mode for the model.
1255
+ - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
1256
+ - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
1257
+
1258
+ Please note that most providers do not support all modes.
1259
+
1260
+ Default and recommended: 'auto' (best mode for the model).
1261
+ */
1262
+ mode?: 'auto' | 'json' | 'tool';
1263
+ /**
1264
+ Optional telemetry configuration (experimental).
1265
+ */
1266
+ experimental_telemetry?: TelemetrySettings;
1267
+ /**
1268
+ Additional provider-specific metadata. They are passed through
1269
+ to the provider from the AI SDK and enable provider-specific
1270
+ functionality that can be fully encapsulated in the provider.
1271
+ */
1272
+ experimental_providerMetadata?: ProviderMetadata;
1273
+ /**
1274
+ Callback that is called when the LLM response and the final object validation are finished.
1275
+ */
1276
+ onFinish?: OnFinishCallback<Array<ELEMENT>>;
1277
+ /**
1278
+ * Internal. For test use only. May change without notice.
1279
+ */
1280
+ _internal?: {
1281
+ generateId?: () => string;
1282
+ currentDate?: () => Date;
1283
+ now?: () => number;
1284
+ };
1285
+ }): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
1286
+ /**
1287
+ Generate JSON with any schema for a given prompt using a language model.
1288
+
1289
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
1290
+
1291
+ @return
1292
+ A result object for accessing the partial object stream and additional information.
1293
+ */
1294
+ declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
1295
+ output: 'no-schema';
1296
+ /**
1297
+ The language model to use.
1298
+ */
1299
+ model: LanguageModel;
1300
+ /**
1301
+ The mode to use for object generation. Must be "json" for no-schema output.
1302
+ */
1303
+ mode?: 'json';
1304
+ /**
1305
+ Optional telemetry configuration (experimental).
1306
+ */
1307
+ experimental_telemetry?: TelemetrySettings;
1308
+ /**
1309
+ Additional provider-specific metadata. They are passed through
1310
+ to the provider from the AI SDK and enable provider-specific
1311
+ functionality that can be fully encapsulated in the provider.
1312
+ */
1313
+ experimental_providerMetadata?: ProviderMetadata;
1314
+ /**
1315
+ Callback that is called when the LLM response and the final object validation are finished.
1316
+ */
1317
+ onFinish?: OnFinishCallback<JSONValue>;
1318
+ /**
1319
+ * Internal. For test use only. May change without notice.
1320
+ */
1321
+ _internal?: {
1322
+ generateId?: () => string;
1323
+ currentDate?: () => Date;
1324
+ now?: () => number;
1325
+ };
1326
+ }): StreamObjectResult<JSONValue, JSONValue, never>;
1327
+
1328
+ type Parameters = z.ZodTypeAny | Schema<any>;
1329
+ type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
1330
+ interface ToolExecutionOptions {
1331
+ /**
1332
+ * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
1333
+ */
1334
+ toolCallId: string;
1335
+ /**
1336
+ * Messages that were sent to the language model to initiate the response that contained the tool call.
1337
+ * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
1338
+ */
1339
+ messages: CoreMessage[];
1340
+ /**
1341
+ * An optional abort signal that indicates that the overall operation should be aborted.
1342
+ */
1343
+ abortSignal?: AbortSignal;
1344
+ }
1345
+ /**
1346
+ A tool contains the description and the schema of the input that the tool expects.
1347
+ This enables the language model to generate the input.
1348
+
1349
+ The tool can also contain an optional execute function for the actual execution function of the tool.
1350
+ */
1351
+ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
1352
+ /**
1353
+ The schema of the input that the tool expects. The language model will use this to generate the input.
1354
+ It is also used to validate the output of the language model.
1355
+ Use descriptions to make the input understandable for the language model.
1356
+ */
1357
+ parameters: PARAMETERS;
1358
+ /**
1359
+ Optional conversion function that maps the tool result to multi-part tool content for LLMs.
1360
+ */
1361
+ experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
1362
+ /**
1363
+ An async function that is called with the arguments from the tool call and produces a result.
1364
+ If not provided, the tool will not be executed automatically.
1365
+
1366
+ @args is the input of the tool call.
1367
+ @options.abortSignal is a signal that can be used to abort the tool call.
1368
+ */
1369
+ execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1370
+ } & ({
1371
+ /**
1372
+ Function tool.
1373
+ */
1374
+ type?: undefined | 'function';
1375
+ /**
1376
+ An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
1377
+ */
1378
+ description?: string;
1379
+ } | {
1380
+ /**
1381
+ Provider-defined tool.
1382
+ */
1383
+ type: 'provider-defined';
1384
+ /**
1385
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
1386
+ */
1387
+ id: `${string}.${string}`;
1388
+ /**
1389
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
1390
+ */
1391
+ args: Record<string, unknown>;
1392
+ });
1393
+ /**
1394
+ Helper function for inferring the execute args of a tool.
1395
+ */
1396
+ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
1397
+ execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1398
+ }): CoreTool<PARAMETERS, RESULT> & {
1399
+ execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
1400
+ };
1401
+ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
1402
+ execute?: undefined;
1403
+ }): CoreTool<PARAMETERS, RESULT> & {
1404
+ execute: undefined;
1405
+ };
1406
+
672
1407
  /**
673
- * @deprecated Use `CoreTool` instead.
1408
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
1409
+ with the AI core functions (e.g. `streamText`).
674
1410
  */
675
- type ExperimentalTool = CoreTool;
1411
+ declare function convertToCoreMessages<TOOLS extends Record<string, CoreTool> = never>(messages: Array<UIMessage>, options?: {
1412
+ tools?: TOOLS;
1413
+ }): CoreMessage[];
676
1414
 
677
1415
  /**
678
1416
  Create a union of the given object's values, and optionally specify which keys to get the values from.
@@ -716,56 +1454,16 @@ onlyBar('bar');
716
1454
  */
717
1455
  type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
718
1456
 
719
- /**
720
- Typed tool call that is returned by generateText and streamText.
721
- It contains the tool call ID, the tool name, and the tool arguments.
722
- */
723
- interface ToolCall$1<NAME extends string, ARGS> {
724
- /**
725
- ID of the tool call. This ID is used to match the tool call with the tool result.
726
- */
727
- toolCallId: string;
728
- /**
729
- Name of the tool that is being called.
730
- */
731
- toolName: NAME;
732
- /**
733
- Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
734
- */
735
- args: ARGS;
736
- }
737
- type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
1457
+ type ToolCallUnion<TOOLS extends Record<string, CoreTool>> = ValueOf<{
738
1458
  [NAME in keyof TOOLS]: {
739
1459
  type: 'tool-call';
740
1460
  toolCallId: string;
741
1461
  toolName: NAME & string;
742
- args: z.infer<TOOLS[NAME]['parameters']>;
1462
+ args: inferParameters<TOOLS[NAME]['parameters']>;
743
1463
  };
744
1464
  }>;
745
- type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
1465
+ type ToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToolCallUnion<TOOLS>>;
746
1466
 
747
- /**
748
- Typed tool result that is returned by generateText and streamText.
749
- It contains the tool call ID, the tool name, the tool arguments, and the tool result.
750
- */
751
- interface ToolResult<NAME extends string, ARGS, RESULT> {
752
- /**
753
- ID of the tool call. This ID is used to match the tool call with the tool result.
754
- */
755
- toolCallId: string;
756
- /**
757
- Name of the tool that was called.
758
- */
759
- toolName: NAME;
760
- /**
761
- Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
762
- */
763
- args: ARGS;
764
- /**
765
- Result of the tool call. This is the result of the tool's execution.
766
- */
767
- result: RESULT;
768
- }
769
1467
  type ToToolsWithExecute<TOOLS extends Record<string, CoreTool>> = {
770
1468
  [K in keyof TOOLS as TOOLS[K] extends {
771
1469
  execute: any;
@@ -779,125 +1477,247 @@ type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
779
1477
  type: 'tool-result';
780
1478
  toolCallId: string;
781
1479
  toolName: NAME & string;
782
- args: z.infer<TOOLS[NAME]['parameters']>;
1480
+ args: inferParameters<TOOLS[NAME]['parameters']>;
783
1481
  result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
784
1482
  };
785
1483
  }>;
786
- type ToToolResult<TOOLS extends Record<string, CoreTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
787
- type ToToolResultArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolResult<TOOLS>>;
788
-
789
- /**
790
- Generate a text and call tools for a given prompt using a language model.
791
-
792
- This function does not stream the output. If you want to stream the output, use `streamText` instead.
1484
+ type ToolResultUnion<TOOLS extends Record<string, CoreTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
1485
+ type ToolResultArray<TOOLS extends Record<string, CoreTool>> = Array<ToolResultUnion<TOOLS>>;
793
1486
 
794
- @param model - The language model to use.
795
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
796
-
797
- @param system - A system message that will be part of the prompt.
798
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
799
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
800
-
801
- @param maxTokens - Maximum number of tokens to generate.
802
- @param temperature - Temperature setting.
803
- The value is passed through to the provider. The range depends on the provider and model.
804
- It is recommended to set either `temperature` or `topP`, but not both.
805
- @param topP - Nucleus sampling.
806
- The value is passed through to the provider. The range depends on the provider and model.
807
- It is recommended to set either `temperature` or `topP`, but not both.
808
- @param presencePenalty - Presence penalty setting.
809
- It affects the likelihood of the model to repeat information that is already in the prompt.
810
- The value is passed through to the provider. The range depends on the provider and model.
811
- @param frequencyPenalty - Frequency penalty setting.
812
- It affects the likelihood of the model to repeatedly use the same words or phrases.
813
- The value is passed through to the provider. The range depends on the provider and model.
814
- @param seed - The seed (integer) to use for random sampling.
815
- If set and supported by the model, calls will generate deterministic results.
816
-
817
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
818
- @param abortSignal - An optional abort signal that can be used to cancel the call.
819
-
820
- @returns
821
- A result object that contains the generated text, the results of the tool calls, and additional information.
822
- */
823
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
824
- /**
825
- The language model to use.
826
- */
827
- model: LanguageModel;
828
- /**
829
- The tools that the model can call. The model needs to support calling tools.
830
- */
831
- tools?: TOOLS;
832
- }): Promise<GenerateTextResult<TOOLS>>;
833
1487
  /**
834
- The result of a `generateText` call.
835
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
1488
+ * The result of a single step in the generation process.
836
1489
  */
837
- declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1490
+ type StepResult<TOOLS extends Record<string, CoreTool>> = {
838
1491
  /**
839
1492
  The generated text.
840
- */
1493
+ */
841
1494
  readonly text: string;
842
1495
  /**
843
1496
  The tool calls that were made during the generation.
844
- */
845
- readonly toolCalls: ToToolCallArray<TOOLS>;
1497
+ */
1498
+ readonly toolCalls: ToolCallArray<TOOLS>;
846
1499
  /**
847
1500
  The results of the tool calls.
848
- */
849
- readonly toolResults: ToToolResultArray<TOOLS>;
1501
+ */
1502
+ readonly toolResults: ToolResultArray<TOOLS>;
850
1503
  /**
851
1504
  The reason why the generation finished.
852
- */
1505
+ */
853
1506
  readonly finishReason: FinishReason;
854
1507
  /**
855
1508
  The token usage of the generated text.
856
- */
857
- readonly usage: TokenUsage;
1509
+ */
1510
+ readonly usage: LanguageModelUsage;
858
1511
  /**
859
- Warnings from the model provider (e.g. unsupported settings)
860
- */
1512
+ Warnings from the model provider (e.g. unsupported settings).
1513
+ */
861
1514
  readonly warnings: CallWarning[] | undefined;
862
1515
  /**
863
- Optional raw response data.
1516
+ Logprobs for the completion.
1517
+ `undefined` if the mode does not support logprobs or if was not enabled.
1518
+ */
1519
+ readonly logprobs: LogProbs | undefined;
1520
+ /**
1521
+ Additional request information.
864
1522
  */
865
- rawResponse?: {
1523
+ readonly request: LanguageModelRequestMetadata;
1524
+ /**
1525
+ Additional response information.
1526
+ */
1527
+ readonly response: LanguageModelResponseMetadata & {
866
1528
  /**
867
- Response headers.
868
- */
869
- headers?: Record<string, string>;
1529
+ The response messages that were generated during the call. It consists of an assistant message,
1530
+ potentially containing tool calls.
1531
+ */
1532
+ readonly messages: Array<CoreAssistantMessage | CoreToolMessage>;
870
1533
  };
871
1534
  /**
872
- Logprobs for the completion.
873
- `undefined` if the mode does not support logprobs or if was not enabled
1535
+ Additional provider-specific metadata. They are passed through
1536
+ from the provider to the AI SDK and enable provider-specific
1537
+ results that can be fully encapsulated in the provider.
874
1538
  */
875
- readonly logprobs: LogProbs | undefined;
876
- constructor(options: {
877
- text: string;
878
- toolCalls: ToToolCallArray<TOOLS>;
879
- toolResults: ToToolResultArray<TOOLS>;
880
- finishReason: FinishReason;
881
- usage: TokenUsage;
882
- warnings: CallWarning[] | undefined;
883
- rawResponse?: {
884
- headers?: Record<string, string>;
885
- };
886
- logprobs: LogProbs | undefined;
887
- });
888
- }
1539
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1540
+ /**
1541
+ The type of step that this result is for. The first step is always
1542
+ an "initial" step, and subsequent steps are either "continue" steps
1543
+ or "tool-result" steps.
1544
+ */
1545
+ readonly stepType: 'initial' | 'continue' | 'tool-result';
1546
+ /**
1547
+ True when there will be a continuation step with a continuation text.
1548
+ */
1549
+ readonly isContinued: boolean;
1550
+ };
1551
+
889
1552
  /**
890
- * @deprecated Use `generateText` instead.
1553
+ The result of a `generateText` call.
1554
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
891
1555
  */
892
- declare const experimental_generateText: typeof generateText;
1556
+ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>, OUTPUT> {
1557
+ /**
1558
+ The generated text.
1559
+ */
1560
+ readonly text: string;
1561
+ /**
1562
+ The generated structured output. It uses the `experimental_output` specification.
1563
+ */
1564
+ readonly experimental_output: OUTPUT;
1565
+ /**
1566
+ The tool calls that were made during the generation.
1567
+ */
1568
+ readonly toolCalls: ToolCallArray<TOOLS>;
1569
+ /**
1570
+ The results of the tool calls.
1571
+ */
1572
+ readonly toolResults: ToolResultArray<TOOLS>;
1573
+ /**
1574
+ The reason why the generation finished.
1575
+ */
1576
+ readonly finishReason: FinishReason;
1577
+ /**
1578
+ The token usage of the generated text.
1579
+ */
1580
+ readonly usage: LanguageModelUsage;
1581
+ /**
1582
+ Warnings from the model provider (e.g. unsupported settings)
1583
+ */
1584
+ readonly warnings: CallWarning[] | undefined;
1585
+ /**
1586
+ Details for all steps.
1587
+ You can use this to get information about intermediate steps,
1588
+ such as the tool calls or the response headers.
1589
+ */
1590
+ readonly steps: Array<StepResult<TOOLS>>;
1591
+ /**
1592
+ Additional request information.
1593
+ */
1594
+ readonly request: LanguageModelRequestMetadata;
1595
+ /**
1596
+ Additional response information.
1597
+ */
1598
+ readonly response: LanguageModelResponseMetadata & {
1599
+ /**
1600
+ The response messages that were generated during the call. It consists of an assistant message,
1601
+ potentially containing tool calls.
1602
+
1603
+ When there are tool results, there is an additional tool message with the tool results that are available.
1604
+ If there are tools that do not have execute functions, they are not included in the tool results and
1605
+ need to be added separately.
1606
+ */
1607
+ messages: Array<CoreAssistantMessage | CoreToolMessage>;
1608
+ };
1609
+ /**
1610
+ Logprobs for the completion.
1611
+ `undefined` if the mode does not support logprobs or if it was not enabled.
1612
+
1613
+ @deprecated Will become a provider extension in the future.
1614
+ */
1615
+ readonly logprobs: LogProbs | undefined;
1616
+ /**
1617
+ Additional provider-specific metadata. They are passed through
1618
+ from the provider to the AI SDK and enable provider-specific
1619
+ results that can be fully encapsulated in the provider.
1620
+ */
1621
+ readonly experimental_providerMetadata: ProviderMetadata | undefined;
1622
+ }
1623
+
1624
+ interface Output<OUTPUT, PARTIAL> {
1625
+ readonly type: 'object' | 'text';
1626
+ injectIntoSystemPrompt(options: {
1627
+ system: string | undefined;
1628
+ model: LanguageModel;
1629
+ }): string | undefined;
1630
+ responseFormat: (options: {
1631
+ model: LanguageModel;
1632
+ }) => LanguageModelV1CallOptions['responseFormat'];
1633
+ parsePartial(options: {
1634
+ text: string;
1635
+ }): {
1636
+ partial: PARTIAL;
1637
+ } | undefined;
1638
+ parseOutput(options: {
1639
+ text: string;
1640
+ }, context: {
1641
+ response: LanguageModelResponseMetadata;
1642
+ usage: LanguageModelUsage;
1643
+ }): OUTPUT;
1644
+ }
1645
+ declare const text: () => Output<string, string>;
1646
+ declare const object: <OUTPUT>({ schema: inputSchema, }: {
1647
+ schema: z.Schema<OUTPUT, z.ZodTypeDef, any> | Schema<OUTPUT>;
1648
+ }) => Output<OUTPUT, DeepPartial<OUTPUT>>;
1649
+
1650
+ type output_Output<OUTPUT, PARTIAL> = Output<OUTPUT, PARTIAL>;
1651
+ declare const output_object: typeof object;
1652
+ declare const output_text: typeof text;
1653
+ declare namespace output {
1654
+ export {
1655
+ output_Output as Output,
1656
+ output_object as object,
1657
+ output_text as text,
1658
+ };
1659
+ }
1660
+
1661
+ declare const symbol$c: unique symbol;
1662
+ declare class InvalidToolArgumentsError extends AISDKError {
1663
+ private readonly [symbol$c];
1664
+ readonly toolName: string;
1665
+ readonly toolArgs: string;
1666
+ constructor({ toolArgs, toolName, cause, message, }: {
1667
+ message?: string;
1668
+ toolArgs: string;
1669
+ toolName: string;
1670
+ cause: unknown;
1671
+ });
1672
+ static isInstance(error: unknown): error is InvalidToolArgumentsError;
1673
+ }
1674
+
1675
+ declare const symbol$b: unique symbol;
1676
+ declare class NoSuchToolError extends AISDKError {
1677
+ private readonly [symbol$b];
1678
+ readonly toolName: string;
1679
+ readonly availableTools: string[] | undefined;
1680
+ constructor({ toolName, availableTools, message, }: {
1681
+ toolName: string;
1682
+ availableTools?: string[] | undefined;
1683
+ message?: string;
1684
+ });
1685
+ static isInstance(error: unknown): error is NoSuchToolError;
1686
+ }
1687
+
1688
+ /**
1689
+ * A function that attempts to repair a tool call that failed to parse.
1690
+ *
1691
+ * It receives the error and the context as arguments and returns the repair
1692
+ * tool call JSON as text.
1693
+ *
1694
+ * @param options.system - The system prompt.
1695
+ * @param options.messages - The messages in the current generation step.
1696
+ * @param options.toolCall - The tool call that failed to parse.
1697
+ * @param options.tools - The tools that are available.
1698
+ * @param options.parameterSchema - A function that returns the JSON Schema for a tool.
1699
+ * @param options.error - The error that occurred while parsing the tool call.
1700
+ */
1701
+ type ToolCallRepairFunction<TOOLS extends Record<string, CoreTool>> = (options: {
1702
+ system: string | undefined;
1703
+ messages: CoreMessage[];
1704
+ toolCall: LanguageModelV1FunctionToolCall;
1705
+ tools: TOOLS;
1706
+ parameterSchema: (options: {
1707
+ toolName: string;
1708
+ }) => JSONSchema7;
1709
+ error: NoSuchToolError | InvalidToolArgumentsError;
1710
+ }) => Promise<LanguageModelV1FunctionToolCall | null>;
893
1711
 
894
1712
  /**
895
1713
  Generate a text and call tools for a given prompt using a language model.
896
1714
 
897
- This function streams the output. If you do not want to stream the output, use `generateText` instead.
1715
+ This function does not stream the output. If you want to stream the output, use `streamText` instead.
898
1716
 
899
1717
  @param model - The language model to use.
1718
+
900
1719
  @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1720
+ @param toolChoice - The tool choice strategy. Default: 'auto'.
901
1721
 
902
1722
  @param system - A system message that will be part of the prompt.
903
1723
  @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@@ -910,1072 +1730,759 @@ It is recommended to set either `temperature` or `topP`, but not both.
910
1730
  @param topP - Nucleus sampling.
911
1731
  The value is passed through to the provider. The range depends on the provider and model.
912
1732
  It is recommended to set either `temperature` or `topP`, but not both.
1733
+ @param topK - Only sample from the top K options for each subsequent token.
1734
+ Used to remove "long tail" low probability responses.
1735
+ Recommended for advanced use cases only. You usually only need to use temperature.
913
1736
  @param presencePenalty - Presence penalty setting.
914
1737
  It affects the likelihood of the model to repeat information that is already in the prompt.
915
1738
  The value is passed through to the provider. The range depends on the provider and model.
916
1739
  @param frequencyPenalty - Frequency penalty setting.
917
1740
  It affects the likelihood of the model to repeatedly use the same words or phrases.
918
1741
  The value is passed through to the provider. The range depends on the provider and model.
1742
+ @param stopSequences - Stop sequences.
1743
+ If set, the model will stop generating text when one of the stop sequences is generated.
919
1744
  @param seed - The seed (integer) to use for random sampling.
920
1745
  If set and supported by the model, calls will generate deterministic results.
921
1746
 
922
1747
  @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
923
1748
  @param abortSignal - An optional abort signal that can be used to cancel the call.
1749
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
924
1750
 
925
- @return
926
- A result object for accessing different stream types and additional information.
1751
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1752
+
1753
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1754
+
1755
+ @returns
1756
+ A result object that contains the generated text, the results of the tool calls, and additional information.
927
1757
  */
928
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
1758
+ declare function generateText<TOOLS extends Record<string, CoreTool>, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
929
1759
  /**
930
1760
  The language model to use.
931
1761
  */
932
1762
  model: LanguageModel;
933
1763
  /**
934
1764
  The tools that the model can call. The model needs to support calling tools.
935
- */
1765
+ */
936
1766
  tools?: TOOLS;
937
- }): Promise<StreamTextResult<TOOLS>>;
938
- declare function streamResponse<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
939
1767
  /**
940
- The language model to use.
1768
+ The tool choice strategy. Default: 'auto'.
941
1769
  */
942
- model: LanguageModel;
1770
+ toolChoice?: CoreToolChoice<TOOLS>;
943
1771
  /**
944
- The tools that the model can call. The model needs to support calling tools.
945
- */
946
- tools?: TOOLS;
947
- }): Promise<{
948
- stream: ReadableStream;
949
- warnings: LanguageModelV1CallWarning[] | undefined;
950
- rawResponse: {
951
- headers?: Record<string, string>;
952
- } | undefined;
953
- }>;
954
- type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
955
- type: 'text-delta';
956
- textDelta: string;
957
- } | ({
958
- type: 'tool-call';
959
- } & ToToolCall<TOOLS>) | {
960
- type: 'error';
961
- error: unknown;
962
- } | ({
963
- type: 'tool-result';
964
- } & ToToolResult<TOOLS>) | {
965
- type: 'finish';
966
- finishReason: FinishReason;
967
- logprobs?: LogProbs;
968
- usage: {
969
- promptTokens: number;
970
- completionTokens: number;
971
- totalTokens: number;
972
- };
973
- };
974
- /**
975
- A result object for accessing different stream types and additional information.
976
- */
977
- declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
978
- private originalStream;
979
- /**
980
- Warnings from the model provider (e.g. unsupported settings).
981
- */
982
- readonly warnings: CallWarning[] | undefined;
983
- /**
984
- The token usage of the generated text. Resolved when the response is finished.
985
- */
986
- readonly usage: Promise<TokenUsage>;
987
- /**
988
- The reason why the generation finished. Resolved when the response is finished.
989
- */
990
- readonly finishReason: Promise<FinishReason>;
991
- /**
992
- Optional raw response data.
993
- */
994
- rawResponse?: {
995
- /**
996
- Response headers.
997
- */
998
- headers?: Record<string, string>;
999
- };
1000
- constructor({ stream, warnings, rawResponse, }: {
1001
- stream: ReadableStream<TextStreamPart<TOOLS>>;
1002
- warnings: CallWarning[] | undefined;
1003
- rawResponse?: {
1004
- headers?: Record<string, string>;
1005
- };
1006
- });
1007
- /**
1008
- Split out a new stream from the original stream.
1009
- The original stream is replaced to allow for further splitting,
1010
- since we do not know how many times the stream will be split.
1011
-
1012
- Note: this leads to buffering the stream content on the server.
1013
- However, the LLM results are expected to be small enough to not cause issues.
1014
- */
1015
- private teeStream;
1016
- /**
1017
- A text stream that returns only the generated text deltas. You can use it
1018
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
1019
- stream will throw the error.
1020
- */
1021
- get textStream(): AsyncIterableStream<string>;
1022
- /**
1023
- A stream with all events, including text deltas, tool calls, tool results, and
1024
- errors.
1025
- You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
1026
- stream will throw the error.
1027
- */
1028
- get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
1029
- /**
1030
- Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
1031
- It can be used with the `useChat` and `useCompletion` hooks.
1032
-
1033
- @param callbacks
1034
- Stream callbacks that will be called when the stream emits events.
1035
-
1036
- @returns an `AIStream` object.
1037
- */
1038
- toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
1039
- /**
1040
- Writes stream data output to a Node.js response-like object.
1041
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1042
- writes each stream data part as a separate chunk.
1043
-
1044
- @param response A Node.js response-like object (ServerResponse).
1045
- @param init Optional headers and status code.
1046
- */
1047
- pipeAIStreamToResponse(response: ServerResponse, init?: {
1048
- headers?: Record<string, string>;
1049
- status?: number;
1050
- }): void;
1051
- /**
1052
- Writes text delta output to a Node.js response-like object.
1053
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1054
- writes each text delta as a separate chunk.
1055
-
1056
- @param response A Node.js response-like object (ServerResponse).
1057
- @param init Optional headers and status code.
1058
- */
1059
- pipeTextStreamToResponse(response: ServerResponse, init?: {
1060
- headers?: Record<string, string>;
1061
- status?: number;
1062
- }): void;
1063
- /**
1064
- Converts the result to a streamed response object with a stream data part stream.
1065
- It can be used with the `useChat` and `useCompletion` hooks.
1066
-
1067
- @param init Optional headers.
1068
-
1069
- @return A response object.
1070
- */
1071
- toAIStreamResponse(init?: ResponseInit): Response;
1072
- /**
1073
- Creates a simple text stream response.
1074
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
1075
- Non-text-delta events are ignored.
1076
-
1077
- @param init Optional headers and status code.
1078
- */
1079
- toTextStreamResponse(init?: ResponseInit): Response;
1080
- }
1081
- /**
1082
- * @deprecated Use `streamText` instead.
1083
- */
1084
- declare const experimental_streamText: typeof streamText;
1772
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
1085
1773
 
1086
- /**
1087
- Converts an array of messages from useChat into an array of CoreMessages that can be used
1088
- with the AI core functions (e.g. `streamText`).
1089
- */
1090
- declare function convertToCoreMessages(messages: Array<{
1091
- role: 'user' | 'assistant';
1092
- content: string;
1093
- toolInvocations?: Array<ToolResult<string, unknown, unknown>>;
1094
- }>): CoreMessage[];
1774
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
1095
1775
 
1096
- type AssistantStatus = 'in_progress' | 'awaiting_message';
1097
- type UseAssistantOptions = {
1098
- /**
1099
- * The API endpoint that accepts a `{ threadId: string | null; message: string; }` object and returns an `AssistantResponse` stream.
1100
- * The threadId refers to an existing thread with messages (or is `null` to create a new thread).
1101
- * The message is the next message that should be appended to the thread and sent to the assistant.
1776
+ By default, it's set to 1, which means that only a single LLM call is made.
1102
1777
  */
1103
- api: string;
1778
+ maxSteps?: number;
1104
1779
  /**
1105
- * An optional string that represents the ID of an existing thread.
1106
- * If not provided, a new thread will be created.
1780
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1781
+
1782
+ By default, it's set to false.
1107
1783
  */
1108
- threadId?: string;
1784
+ experimental_continueSteps?: boolean;
1109
1785
  /**
1110
- * An optional literal that sets the mode of credentials to be used on the request.
1111
- * Defaults to "same-origin".
1786
+ Optional telemetry configuration (experimental).
1112
1787
  */
1113
- credentials?: RequestCredentials;
1788
+ experimental_telemetry?: TelemetrySettings;
1114
1789
  /**
1115
- * An optional object of headers to be passed to the API endpoint.
1116
- */
1117
- headers?: Record<string, string> | Headers;
1790
+ Additional provider-specific metadata. They are passed through
1791
+ to the provider from the AI SDK and enable provider-specific
1792
+ functionality that can be fully encapsulated in the provider.
1793
+ */
1794
+ experimental_providerMetadata?: ProviderMetadata;
1118
1795
  /**
1119
- * An optional, additional body object to be passed to the API endpoint.
1796
+ Limits the tools that are available for the model to call without
1797
+ changing the tool call and result types in the result.
1120
1798
  */
1121
- body?: object;
1799
+ experimental_activeTools?: Array<keyof TOOLS>;
1122
1800
  /**
1123
- * An optional callback that will be called when the assistant encounters an error.
1801
+ Optional specification for parsing structured outputs from the LLM response.
1124
1802
  */
1125
- onError?: (error: Error) => void;
1126
- };
1127
-
1128
- interface FunctionCall$1 {
1803
+ experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
1129
1804
  /**
1130
- * The arguments to call the function with, as generated by the model in JSON
1131
- * format. Note that the model does not always generate valid JSON, and may
1132
- * hallucinate parameters not defined by your function schema. Validate the
1133
- * arguments in your code before calling your function.
1805
+ A function that attempts to repair a tool call that failed to parse.
1134
1806
  */
1135
- arguments?: string;
1807
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
1136
1808
  /**
1137
- * The name of the function to call.
1809
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
1810
+ */
1811
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1812
+ /**
1813
+ * Internal. For test use only. May change without notice.
1138
1814
  */
1139
- name?: string;
1140
- }
1141
- /**
1142
- * The tool calls generated by the model, such as function calls.
1143
- */
1144
- interface ToolCall {
1145
- id: string;
1146
- type: string;
1147
- function: {
1148
- name: string;
1149
- arguments: string;
1150
- };
1151
- }
1152
- /**
1153
- * Controls which (if any) function is called by the model.
1154
- * - none means the model will not call a function and instead generates a message.
1155
- * - auto means the model can pick between generating a message or calling a function.
1156
- * - Specifying a particular function via {"type: "function", "function": {"name": "my_function"}} forces the model to call that function.
1157
- * none is the default when no functions are present. auto is the default if functions are present.
1158
- */
1159
- type ToolChoice = 'none' | 'auto' | {
1160
- type: 'function';
1161
- function: {
1162
- name: string;
1815
+ _internal?: {
1816
+ generateId?: () => string;
1817
+ currentDate?: () => Date;
1163
1818
  };
1164
- };
1819
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1820
+
1165
1821
  /**
1166
- * A list of tools the model may call. Currently, only functions are supported as a tool.
1167
- * Use this to provide a list of functions the model may generate JSON inputs for.
1822
+ * A stream wrapper to send custom JSON-encoded data back to the client.
1823
+ *
1824
+ * @deprecated Please use `createDataStream`, `createDataStreamResponse`, and `pipeDataStreamToResponse` instead.
1168
1825
  */
1169
- interface Tool {
1170
- type: 'function';
1171
- function: Function;
1172
- }
1173
- interface Function {
1174
- /**
1175
- * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
1176
- * underscores and dashes, with a maximum length of 64.
1177
- */
1178
- name: string;
1179
- /**
1180
- * The parameters the functions accepts, described as a JSON Schema object. See the
1181
- * [guide](/docs/guides/gpt/function-calling) for examples, and the
1182
- * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
1183
- * documentation about the format.
1184
- *
1185
- * To describe a function that accepts no parameters, provide the value
1186
- * `{"type": "object", "properties": {}}`.
1187
- */
1188
- parameters: Record<string, unknown>;
1189
- /**
1190
- * A description of what the function does, used by the model to choose when and
1191
- * how to call the function.
1192
- */
1193
- description?: string;
1826
+ declare class StreamData {
1827
+ private encoder;
1828
+ private controller;
1829
+ stream: ReadableStream<Uint8Array>;
1830
+ private isClosed;
1831
+ private warningTimeout;
1832
+ constructor();
1833
+ close(): Promise<void>;
1834
+ append(value: JSONValue$1): void;
1835
+ appendMessageAnnotation(value: JSONValue$1): void;
1194
1836
  }
1195
- type IdGenerator = () => string;
1196
- /**
1197
- Tool invocations are either tool calls or tool results. For each assistant tool call,
1198
- there is one tool invocation. While the call is in progress, the invocation is a tool call.
1199
- Once the call is complete, the invocation is a tool result.
1200
- */
1201
- type ToolInvocation = ToolCall$1<string, any> | ToolResult<string, any, any>;
1837
+
1202
1838
  /**
1203
- * Shared types between the API and UI packages.
1839
+ A result object for accessing different stream types and additional information.
1204
1840
  */
1205
- interface Message$1 {
1206
- id: string;
1207
- tool_call_id?: string;
1208
- createdAt?: Date;
1209
- content: string;
1210
- /**
1211
- @deprecated Use AI SDK RSC instead: https://sdk.vercel.ai/docs/ai-sdk-rsc
1212
- */
1213
- ui?: string | JSX.Element | JSX.Element[] | null | undefined;
1214
- role: 'system' | 'user' | 'assistant' | 'function' | 'data' | 'tool';
1215
- /**
1216
- *
1217
- * If the message has a role of `function`, the `name` field is the name of the function.
1218
- * Otherwise, the name field should not be set.
1219
- */
1220
- name?: string;
1221
- /**
1222
- * If the assistant role makes a function call, the `function_call` field
1223
- * contains the function call name and arguments. Otherwise, the field should
1224
- * not be set. (Deprecated and replaced by tool_calls.)
1225
- */
1226
- function_call?: string | FunctionCall$1;
1227
- data?: JSONValue;
1228
- /**
1229
- * If the assistant role makes a tool call, the `tool_calls` field contains
1230
- * the tool call name and arguments. Otherwise, the field should not be set.
1231
- */
1232
- tool_calls?: string | ToolCall[];
1233
- /**
1234
- * Additional message-specific information added on the server via StreamData
1235
- */
1236
- annotations?: JSONValue[] | undefined;
1237
- /**
1238
- Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
1239
- that the assistant made as part of this message.
1240
- */
1241
- toolInvocations?: Array<ToolInvocation>;
1242
- }
1243
- type CreateMessage = Omit<Message$1, 'id'> & {
1244
- id?: Message$1['id'];
1245
- };
1246
- type ChatRequest = {
1247
- messages: Message$1[];
1248
- options?: RequestOptions;
1249
- functions?: Array<Function>;
1250
- function_call?: FunctionCall$1;
1251
- data?: Record<string, string>;
1252
- tools?: Array<Tool>;
1253
- tool_choice?: ToolChoice;
1254
- };
1255
- type FunctionCallHandler = (chatMessages: Message$1[], functionCall: FunctionCall$1) => Promise<ChatRequest | void>;
1256
- type ToolCallHandler = (chatMessages: Message$1[], toolCalls: ToolCall[]) => Promise<ChatRequest | void>;
1257
- type RequestOptions = {
1258
- headers?: Record<string, string> | Headers;
1259
- body?: object;
1260
- };
1261
- type ChatRequestOptions = {
1262
- options?: RequestOptions;
1263
- functions?: Array<Function>;
1264
- function_call?: FunctionCall$1;
1265
- tools?: Array<Tool>;
1266
- tool_choice?: ToolChoice;
1267
- data?: Record<string, string>;
1268
- };
1269
- type UseChatOptions = {
1270
- /**
1271
- * The API endpoint that accepts a `{ messages: Message[] }` object and returns
1272
- * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
1273
- */
1274
- api?: string;
1275
- /**
1276
- * A unique identifier for the chat. If not provided, a random one will be
1277
- * generated. When provided, the `useChat` hook with the same `id` will
1278
- * have shared states across components.
1279
- */
1280
- id?: string;
1841
+ interface StreamTextResult<TOOLS extends Record<string, CoreTool>, PARTIAL_OUTPUT> {
1281
1842
  /**
1282
- * Initial messages of the chat. Useful to load an existing chat history.
1283
- */
1284
- initialMessages?: Message$1[];
1285
- /**
1286
- * Initial input of the chat.
1287
- */
1288
- initialInput?: string;
1289
- /**
1290
- * Callback function to be called when a function call is received.
1291
- * If the function returns a `ChatRequest` object, the request will be sent
1292
- * automatically to the API and will be used to update the chat.
1293
- */
1294
- experimental_onFunctionCall?: FunctionCallHandler;
1295
- /**
1296
- * Callback function to be called when a tool call is received.
1297
- * If the function returns a `ChatRequest` object, the request will be sent
1298
- * automatically to the API and will be used to update the chat.
1299
- */
1300
- experimental_onToolCall?: ToolCallHandler;
1843
+ Warnings from the model provider (e.g. unsupported settings) for the first step.
1844
+ */
1845
+ readonly warnings: Promise<CallWarning[] | undefined>;
1301
1846
  /**
1302
- * Callback function to be called when the API response is received.
1303
- */
1304
- onResponse?: (response: Response) => void | Promise<void>;
1847
+ The total token usage of the generated response.
1848
+ When there are multiple steps, the usage is the sum of all step usages.
1849
+
1850
+ Resolved when the response is finished.
1851
+ */
1852
+ readonly usage: Promise<LanguageModelUsage>;
1305
1853
  /**
1306
- * Callback function to be called when the chat is finished streaming.
1307
- */
1308
- onFinish?: (message: Message$1) => void;
1854
+ The reason why the generation finished. Taken from the last step.
1855
+
1856
+ Resolved when the response is finished.
1857
+ */
1858
+ readonly finishReason: Promise<FinishReason>;
1309
1859
  /**
1310
- * Callback function to be called when an error is encountered.
1860
+ Additional provider-specific metadata from the last step.
1861
+ Metadata is passed through from the provider to the AI SDK and
1862
+ enables provider-specific results that can be fully encapsulated in the provider.
1311
1863
  */
1312
- onError?: (error: Error) => void;
1864
+ readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
1313
1865
  /**
1314
- * A way to provide a function that is going to be used for ids for messages.
1315
- * If not provided nanoid is used by default.
1316
- */
1317
- generateId?: IdGenerator;
1866
+ The full text that has been generated by the last step.
1867
+
1868
+ Resolved when the response is finished.
1869
+ */
1870
+ readonly text: Promise<string>;
1318
1871
  /**
1319
- * The credentials mode to be used for the fetch request.
1320
- * Possible values are: 'omit', 'same-origin', 'include'.
1321
- * Defaults to 'same-origin'.
1322
- */
1323
- credentials?: RequestCredentials;
1872
+ The tool calls that have been executed in the last step.
1873
+
1874
+ Resolved when the response is finished.
1875
+ */
1876
+ readonly toolCalls: Promise<ToolCallUnion<TOOLS>[]>;
1324
1877
  /**
1325
- * HTTP headers to be sent with the API request.
1326
- */
1327
- headers?: Record<string, string> | Headers;
1878
+ The tool results that have been generated in the last step.
1879
+
1880
+ Resolved when the all tool executions are finished.
1881
+ */
1882
+ readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
1328
1883
  /**
1329
- * Extra body object to be sent with the API request.
1330
- * @example
1331
- * Send a `sessionId` to the API along with the messages.
1332
- * ```js
1333
- * useChat({
1334
- * body: {
1335
- * sessionId: '123',
1336
- * }
1337
- * })
1338
- * ```
1884
+ Details for all steps.
1885
+ You can use this to get information about intermediate steps,
1886
+ such as the tool calls or the response headers.
1339
1887
  */
1340
- body?: object;
1888
+ readonly steps: Promise<Array<StepResult<TOOLS>>>;
1341
1889
  /**
1342
- * Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
1343
- * Defaults to `false`. When set to `true`, the API endpoint might need to
1344
- * handle the extra fields before forwarding the request to the AI service.
1345
- */
1346
- sendExtraMessageFields?: boolean;
1347
- /** Stream mode (default to "stream-data") */
1348
- streamMode?: 'stream-data' | 'text';
1349
- };
1350
- type UseCompletionOptions = {
1890
+ Additional request information from the last step.
1891
+ */
1892
+ readonly request: Promise<LanguageModelRequestMetadata>;
1351
1893
  /**
1352
- * The API endpoint that accepts a `{ prompt: string }` object and returns
1353
- * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
1354
- */
1355
- api?: string;
1894
+ Additional response information from the last step.
1895
+ */
1896
+ readonly response: Promise<LanguageModelResponseMetadata & {
1897
+ /**
1898
+ The response messages that were generated during the call. It consists of an assistant message,
1899
+ potentially containing tool calls.
1900
+
1901
+ When there are tool results, there is an additional tool message with the tool results that are available.
1902
+ If there are tools that do not have execute functions, they are not included in the tool results and
1903
+ need to be added separately.
1904
+ */
1905
+ messages: Array<CoreAssistantMessage | CoreToolMessage>;
1906
+ }>;
1356
1907
  /**
1357
- * An unique identifier for the chat. If not provided, a random one will be
1358
- * generated. When provided, the `useChat` hook with the same `id` will
1359
- * have shared states across components.
1360
- */
1361
- id?: string;
1908
+ A text stream that returns only the generated text deltas. You can use it
1909
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
1910
+ stream will throw the error.
1911
+ */
1912
+ readonly textStream: AsyncIterableStream<string>;
1362
1913
  /**
1363
- * Initial prompt input of the completion.
1364
- */
1365
- initialInput?: string;
1914
+ A stream with all events, including text deltas, tool calls, tool results, and
1915
+ errors.
1916
+ You can use it as either an AsyncIterable or a ReadableStream.
1917
+ Only errors that stop the stream, such as network errors, are thrown.
1918
+ */
1919
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
1366
1920
  /**
1367
- * Initial completion result. Useful to load an existing history.
1921
+ A stream of partial outputs. It uses the `experimental_output` specification.
1368
1922
  */
1369
- initialCompletion?: string;
1923
+ readonly experimental_partialOutputStream: AsyncIterableStream<PARTIAL_OUTPUT>;
1370
1924
  /**
1371
- * Callback function to be called when the API response is received.
1372
- */
1373
- onResponse?: (response: Response) => void | Promise<void>;
1925
+ Converts the result to a data stream.
1926
+
1927
+ @param data an optional StreamData object that will be merged into the stream.
1928
+ @param getErrorMessage an optional function that converts an error to an error message.
1929
+ @param sendUsage whether to send the usage information to the client. Defaults to true.
1930
+
1931
+ @return A data stream.
1932
+ */
1933
+ toDataStream(options?: {
1934
+ data?: StreamData;
1935
+ getErrorMessage?: (error: unknown) => string;
1936
+ sendUsage?: boolean;
1937
+ }): ReadableStream<Uint8Array>;
1374
1938
  /**
1375
- * Callback function to be called when the completion is finished streaming.
1939
+ * Merges the result as a data stream into another data stream.
1940
+ *
1941
+ * @param dataStream A data stream writer.
1376
1942
  */
1377
- onFinish?: (prompt: string, completion: string) => void;
1943
+ mergeIntoDataStream(dataStream: DataStreamWriter): void;
1378
1944
  /**
1379
- * Callback function to be called when an error is encountered.
1380
- */
1381
- onError?: (error: Error) => void;
1945
+ Writes data stream output to a Node.js response-like object.
1946
+
1947
+ @param response A Node.js response-like object (ServerResponse).
1948
+ @param options.status The status code.
1949
+ @param options.statusText The status text.
1950
+ @param options.headers The headers.
1951
+ @param options.data The stream data.
1952
+ @param options.getErrorMessage An optional function that converts an error to an error message.
1953
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
1954
+ */
1955
+ pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & {
1956
+ data?: StreamData;
1957
+ getErrorMessage?: (error: unknown) => string;
1958
+ sendUsage?: boolean;
1959
+ }): void;
1382
1960
  /**
1383
- * The credentials mode to be used for the fetch request.
1384
- * Possible values are: 'omit', 'same-origin', 'include'.
1385
- * Defaults to 'same-origin'.
1386
- */
1387
- credentials?: RequestCredentials;
1961
+ Writes text delta output to a Node.js response-like object.
1962
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1963
+ writes each text delta as a separate chunk.
1964
+
1965
+ @param response A Node.js response-like object (ServerResponse).
1966
+ @param init Optional headers, status code, and status text.
1967
+ */
1968
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
1388
1969
  /**
1389
- * HTTP headers to be sent with the API request.
1390
- */
1391
- headers?: Record<string, string> | Headers;
1970
+ Converts the result to a streamed response object with a stream data part stream.
1971
+ It can be used with the `useChat` and `useCompletion` hooks.
1972
+
1973
+ @param options.status The status code.
1974
+ @param options.statusText The status text.
1975
+ @param options.headers The headers.
1976
+ @param options.data The stream data.
1977
+ @param options.getErrorMessage An optional function that converts an error to an error message.
1978
+ @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
1979
+
1980
+ @return A response object.
1981
+ */
1982
+ toDataStreamResponse(options?: ResponseInit & {
1983
+ data?: StreamData;
1984
+ getErrorMessage?: (error: unknown) => string;
1985
+ sendUsage?: boolean;
1986
+ }): Response;
1392
1987
  /**
1393
- * Extra body object to be sent with the API request.
1394
- * @example
1395
- * Send a `sessionId` to the API along with the prompt.
1396
- * ```js
1397
- * useChat({
1398
- * body: {
1399
- * sessionId: '123',
1400
- * }
1401
- * })
1402
- * ```
1403
- */
1404
- body?: object;
1405
- /** Stream mode (default to "stream-data") */
1406
- streamMode?: 'stream-data' | 'text';
1407
- };
1408
- type JSONValue = null | string | number | boolean | {
1409
- [x: string]: JSONValue;
1410
- } | Array<JSONValue>;
1411
- type AssistantMessage = {
1412
- id: string;
1413
- role: 'assistant';
1414
- content: Array<{
1415
- type: 'text';
1416
- text: {
1417
- value: string;
1418
- };
1419
- }>;
1420
- };
1421
- type DataMessage = {
1422
- id?: string;
1423
- role: 'data';
1424
- data: JSONValue;
1425
- };
1426
-
1427
- interface StreamPart<CODE extends string, NAME extends string, TYPE> {
1428
- code: CODE;
1429
- name: NAME;
1430
- parse: (value: JSONValue) => {
1431
- type: NAME;
1432
- value: TYPE;
1433
- };
1988
+ Creates a simple text stream response.
1989
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
1990
+ Non-text-delta events are ignored.
1991
+
1992
+ @param init Optional headers, status code, and status text.
1993
+ */
1994
+ toTextStreamResponse(init?: ResponseInit): Response;
1434
1995
  }
1435
- declare const textStreamPart: StreamPart<'0', 'text', string>;
1436
- declare const functionCallStreamPart: StreamPart<'1', 'function_call', {
1437
- function_call: FunctionCall$1;
1438
- }>;
1439
- declare const dataStreamPart: StreamPart<'2', 'data', Array<JSONValue>>;
1440
- declare const errorStreamPart: StreamPart<'3', 'error', string>;
1441
- declare const assistantMessageStreamPart: StreamPart<'4', 'assistant_message', AssistantMessage>;
1442
- declare const assistantControlDataStreamPart: StreamPart<'5', 'assistant_control_data', {
1443
- threadId: string;
1444
- messageId: string;
1445
- }>;
1446
- declare const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage>;
1447
- declare const toolCallsStreamPart: StreamPart<'7', 'tool_calls', {
1448
- tool_calls: ToolCall[];
1449
- }>;
1450
- declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
1451
- declare const toolCallStreamPart: StreamPart<'9', 'tool_call', ToolCall$1<string, any>>;
1452
- declare const toolResultStreamPart: StreamPart<'a', 'tool_result', ToolResult<string, any, any>>;
1453
- type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallsStreamPart | typeof messageAnnotationsStreamPart | typeof toolCallStreamPart | typeof toolResultStreamPart;
1454
- /**
1455
- * Maps the type of a stream part to its value type.
1456
- */
1457
- type StreamPartValueType = {
1458
- [P in StreamParts as P['name']]: ReturnType<P['parse']>['value'];
1459
- };
1460
- type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallsStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof toolResultStreamPart.parse>;
1461
- /**
1462
- * The map of prefixes for data in the stream
1463
- *
1464
- * - 0: Text from the LLM response
1465
- * - 1: (OpenAI) function_call responses
1466
- * - 2: custom JSON added by the user using `Data`
1467
- * - 6: (OpenAI) tool_call responses
1468
- *
1469
- * Example:
1470
- * ```
1471
- * 0:Vercel
1472
- * 0:'s
1473
- * 0: AI
1474
- * 0: AI
1475
- * 0: SDK
1476
- * 0: is great
1477
- * 0:!
1478
- * 2: { "someJson": "value" }
1479
- * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
1480
- * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
1481
- *```
1482
- */
1483
- declare const StreamStringPrefixes: {
1484
- readonly text: "0";
1485
- readonly function_call: "1";
1486
- readonly data: "2";
1487
- readonly error: "3";
1488
- readonly assistant_message: "4";
1489
- readonly assistant_control_data: "5";
1490
- readonly data_message: "6";
1491
- readonly tool_calls: "7";
1492
- readonly message_annotations: "8";
1493
- readonly tool_call: "9";
1494
- readonly tool_result: "a";
1996
+ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1997
+ type: 'text-delta';
1998
+ textDelta: string;
1999
+ } | ({
2000
+ type: 'tool-call';
2001
+ } & ToolCallUnion<TOOLS>) | {
2002
+ type: 'tool-call-streaming-start';
2003
+ toolCallId: string;
2004
+ toolName: string;
2005
+ } | {
2006
+ type: 'tool-call-delta';
2007
+ toolCallId: string;
2008
+ toolName: string;
2009
+ argsTextDelta: string;
2010
+ } | ({
2011
+ type: 'tool-result';
2012
+ } & ToolResultUnion<TOOLS>) | {
2013
+ type: 'step-finish';
2014
+ finishReason: FinishReason;
2015
+ logprobs?: LogProbs;
2016
+ usage: LanguageModelUsage;
2017
+ request: LanguageModelRequestMetadata;
2018
+ response: LanguageModelResponseMetadata;
2019
+ warnings: CallWarning[] | undefined;
2020
+ experimental_providerMetadata?: ProviderMetadata;
2021
+ isContinued: boolean;
2022
+ } | {
2023
+ type: 'finish';
2024
+ finishReason: FinishReason;
2025
+ logprobs?: LogProbs;
2026
+ usage: LanguageModelUsage;
2027
+ response: LanguageModelResponseMetadata;
2028
+ experimental_providerMetadata?: ProviderMetadata;
2029
+ } | {
2030
+ type: 'error';
2031
+ error: unknown;
1495
2032
  };
1496
- /**
1497
- Parses a stream part from a string.
1498
2033
 
1499
- @param line The string to parse.
1500
- @returns The parsed stream part.
1501
- @throws An error if the string cannot be parsed.
1502
- */
1503
- declare const parseStreamPart: (line: string) => StreamPartType;
1504
2034
  /**
1505
- Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
1506
- and appends a new line.
2035
+ Generate a text and call tools for a given prompt using a language model.
2036
+
2037
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
2038
+
2039
+ @param model - The language model to use.
2040
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2041
+
2042
+ @param system - A system message that will be part of the prompt.
2043
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2044
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2045
+
2046
+ @param maxTokens - Maximum number of tokens to generate.
2047
+ @param temperature - Temperature setting.
2048
+ The value is passed through to the provider. The range depends on the provider and model.
2049
+ It is recommended to set either `temperature` or `topP`, but not both.
2050
+ @param topP - Nucleus sampling.
2051
+ The value is passed through to the provider. The range depends on the provider and model.
2052
+ It is recommended to set either `temperature` or `topP`, but not both.
2053
+ @param topK - Only sample from the top K options for each subsequent token.
2054
+ Used to remove "long tail" low probability responses.
2055
+ Recommended for advanced use cases only. You usually only need to use temperature.
2056
+ @param presencePenalty - Presence penalty setting.
2057
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2058
+ The value is passed through to the provider. The range depends on the provider and model.
2059
+ @param frequencyPenalty - Frequency penalty setting.
2060
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2061
+ The value is passed through to the provider. The range depends on the provider and model.
2062
+ @param stopSequences - Stop sequences.
2063
+ If set, the model will stop generating text when one of the stop sequences is generated.
2064
+ @param seed - The seed (integer) to use for random sampling.
2065
+ If set and supported by the model, calls will generate deterministic results.
1507
2066
 
1508
- It ensures type-safety for the part type and value.
1509
- */
1510
- declare function formatStreamPart<T extends keyof StreamPartValueType>(type: T, value: StreamPartValueType[T]): StreamString;
2067
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2068
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2069
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1511
2070
 
1512
- /**
1513
- * Generates a 7-character random string to use for IDs. Not secure.
1514
- */
1515
- declare const generateId: (size?: number | undefined) => string;
2071
+ @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
1516
2072
 
1517
- /**
1518
- Converts a ReadableStreamDefaultReader into an async generator that yields
1519
- StreamPart objects.
2073
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2074
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2075
+ @param onFinish - Callback that is called when the LLM response and all request tool executions
2076
+ (for tools that have an `execute` function) are finished.
1520
2077
 
1521
- @param reader
1522
- Reader for the stream to read from.
1523
- @param isAborted
1524
- Optional function that returns true if the request has been aborted.
1525
- If the function returns true, the generator will stop reading the stream.
1526
- If the function is not provided, the generator will not stop reading the stream.
2078
+ @return
2079
+ A result object for accessing different stream types and additional information.
1527
2080
  */
1528
- declare function readDataStream(reader: ReadableStreamDefaultReader<Uint8Array>, { isAborted, }?: {
1529
- isAborted?: () => boolean;
1530
- }): AsyncGenerator<StreamPartType>;
1531
-
1532
- declare function createChunkDecoder(): (chunk: Uint8Array | undefined) => string;
1533
- declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefined) => string;
1534
- declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
1535
- declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | undefined) => StreamPartType[] | string;
1536
-
1537
- declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n` | `9:${string}\n` | `a:${string}\n`;
1538
- type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
1539
-
1540
- declare interface AzureChatCompletions {
1541
- id: string;
1542
- created: Date;
1543
- choices: AzureChatChoice[];
1544
- systemFingerprint?: string;
1545
- usage?: AzureCompletionsUsage;
1546
- promptFilterResults: any[];
1547
- }
1548
- declare interface AzureChatChoice {
1549
- message?: AzureChatResponseMessage;
1550
- index: number;
1551
- finishReason: string | null;
1552
- delta?: AzureChatResponseMessage;
1553
- }
1554
- declare interface AzureChatResponseMessage {
1555
- role: string;
1556
- content: string | null;
1557
- toolCalls: AzureChatCompletionsFunctionToolCall[];
1558
- functionCall?: AzureFunctionCall;
1559
- }
1560
- declare interface AzureCompletionsUsage {
1561
- completionTokens: number;
1562
- promptTokens: number;
1563
- totalTokens: number;
1564
- }
1565
- declare interface AzureFunctionCall {
1566
- name: string;
1567
- arguments: string;
1568
- }
1569
- declare interface AzureChatCompletionsFunctionToolCall {
1570
- type: 'function';
1571
- function: AzureFunctionCall;
1572
- id: string;
1573
- }
1574
-
1575
- type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
1576
- /**
1577
- * @example
1578
- * ```js
1579
- * const response = await openai.chat.completions.create({
1580
- * model: 'gpt-3.5-turbo-0613',
1581
- * stream: true,
1582
- * messages,
1583
- * functions,
1584
- * })
1585
- *
1586
- * const stream = OpenAIStream(response, {
1587
- * experimental_onFunctionCall: async (functionCallPayload, createFunctionCallMessages) => {
1588
- * // ... run your custom logic here
1589
- * const result = await myFunction(functionCallPayload)
1590
- *
1591
- * // Ask for another completion, or return a string to send to the client as an assistant message.
1592
- * return await openai.chat.completions.create({
1593
- * model: 'gpt-3.5-turbo-0613',
1594
- * stream: true,
1595
- * // Append the relevant "assistant" and "function" call messages
1596
- * messages: [...messages, ...createFunctionCallMessages(result)],
1597
- * functions,
1598
- * })
1599
- * }
1600
- * })
1601
- * ```
1602
- */
1603
- experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
1604
- /**
1605
- * @example
1606
- * ```js
1607
- * const response = await openai.chat.completions.create({
1608
- * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
1609
- * stream: true,
1610
- * messages,
1611
- * tools,
1612
- * tool_choice: "auto", // auto is default, but we'll be explicit
1613
- * })
1614
- *
1615
- * const stream = OpenAIStream(response, {
1616
- * experimental_onToolCall: async (toolCallPayload, appendToolCallMessages) => {
1617
- * let messages: CreateMessage[] = []
1618
- * // There might be multiple tool calls, so we need to iterate through them
1619
- * for (const tool of toolCallPayload.tools) {
1620
- * // ... run your custom logic here
1621
- * const result = await myFunction(tool.function)
1622
- * // Append the relevant "assistant" and "tool" call messages
1623
- * appendToolCallMessage({tool_call_id:tool.id, function_name:tool.function.name, tool_call_result:result})
1624
- * }
1625
- * // Ask for another completion, or return a string to send to the client as an assistant message.
1626
- * return await openai.chat.completions.create({
1627
- * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
1628
- * stream: true,
1629
- * // Append the results messages, calling appendToolCallMessage without
1630
- * // any arguments will jsut return the accumulated messages
1631
- * messages: [...messages, ...appendToolCallMessage()],
1632
- * tools,
1633
- * tool_choice: "auto", // auto is default, but we'll be explicit
1634
- * })
1635
- * }
1636
- * })
1637
- * ```
1638
- */
1639
- experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
1640
- tool_call_id: string;
1641
- function_name: string;
1642
- tool_call_result: JSONValue;
1643
- }) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
1644
- };
1645
- interface ChatCompletionChunk {
1646
- id: string;
1647
- choices: Array<ChatCompletionChunkChoice>;
1648
- created: number;
1649
- model: string;
1650
- object: string;
1651
- }
1652
- interface ChatCompletionChunkChoice {
1653
- delta: ChoiceDelta;
1654
- finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
1655
- index: number;
1656
- }
1657
- interface ChoiceDelta {
1658
- /**
1659
- * The contents of the chunk message.
1660
- */
1661
- content?: string | null;
2081
+ declare function streamText<TOOLS extends Record<string, CoreTool>, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1662
2082
  /**
1663
- * The name and arguments of a function that should be called, as generated by the
1664
- * model.
2083
+ The language model to use.
1665
2084
  */
1666
- function_call?: FunctionCall$1;
2085
+ model: LanguageModel;
1667
2086
  /**
1668
- * The role of the author of this message.
1669
- */
1670
- role?: 'system' | 'user' | 'assistant' | 'tool';
1671
- tool_calls?: Array<DeltaToolCall>;
1672
- }
1673
- interface DeltaToolCall {
1674
- index: number;
2087
+ The tools that the model can call. The model needs to support calling tools.
2088
+ */
2089
+ tools?: TOOLS;
1675
2090
  /**
1676
- * The ID of the tool call.
2091
+ The tool choice strategy. Default: 'auto'.
1677
2092
  */
1678
- id?: string;
2093
+ toolChoice?: CoreToolChoice<TOOLS>;
1679
2094
  /**
1680
- * The function that the model called.
1681
- */
1682
- function?: ToolCallFunction;
2095
+ Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
2096
+
2097
+ A maximum number is required to prevent infinite loops in the case of misconfigured tools.
2098
+
2099
+ By default, it's set to 1, which means that only a single LLM call is made.
2100
+ */
2101
+ maxSteps?: number;
1683
2102
  /**
1684
- * The type of the tool. Currently, only `function` is supported.
2103
+ When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
2104
+
2105
+ By default, it's set to false.
1685
2106
  */
1686
- type?: 'function';
1687
- }
1688
- interface ToolCallFunction {
2107
+ experimental_continueSteps?: boolean;
1689
2108
  /**
1690
- * The arguments to call the function with, as generated by the model in JSON
1691
- * format. Note that the model does not always generate valid JSON, and may
1692
- * hallucinate parameters not defined by your function schema. Validate the
1693
- * arguments in your code before calling your function.
2109
+ Optional telemetry configuration (experimental).
1694
2110
  */
1695
- arguments?: string;
2111
+ experimental_telemetry?: TelemetrySettings;
1696
2112
  /**
1697
- * The name of the function to call.
1698
- */
1699
- name?: string;
1700
- }
1701
- /**
1702
- * https://github.com/openai/openai-node/blob/3ec43ee790a2eb6a0ccdd5f25faa23251b0f9b8e/src/resources/completions.ts#L28C1-L64C1
1703
- * Completions API. Streamed and non-streamed responses are the same.
2113
+ Additional provider-specific metadata. They are passed through
2114
+ to the provider from the AI SDK and enable provider-specific
2115
+ functionality that can be fully encapsulated in the provider.
1704
2116
  */
1705
- interface Completion {
2117
+ experimental_providerMetadata?: ProviderMetadata;
1706
2118
  /**
1707
- * A unique identifier for the completion.
2119
+ Limits the tools that are available for the model to call without
2120
+ changing the tool call and result types in the result.
1708
2121
  */
1709
- id: string;
1710
- /**
1711
- * The list of completion choices the model generated for the input prompt.
1712
- */
1713
- choices: Array<CompletionChoice>;
1714
- /**
1715
- * The Unix timestamp of when the completion was created.
1716
- */
1717
- created: number;
2122
+ experimental_activeTools?: Array<keyof TOOLS>;
1718
2123
  /**
1719
- * The model used for completion.
2124
+ Optional specification for parsing structured outputs from the LLM response.
1720
2125
  */
1721
- model: string;
2126
+ experimental_output?: Output<OUTPUT, PARTIAL_OUTPUT>;
1722
2127
  /**
1723
- * The object type, which is always "text_completion"
2128
+ A function that attempts to repair a tool call that failed to parse.
1724
2129
  */
1725
- object: string;
2130
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
1726
2131
  /**
1727
- * Usage statistics for the completion request.
2132
+ Enable streaming of tool call deltas as they are generated. Disabled by default.
1728
2133
  */
1729
- usage?: CompletionUsage;
1730
- }
1731
- interface CompletionChoice {
2134
+ experimental_toolCallStreaming?: boolean;
1732
2135
  /**
1733
- * The reason the model stopped generating tokens. This will be `stop` if the model
1734
- * hit a natural stop point or a provided stop sequence, or `length` if the maximum
1735
- * number of tokens specified in the request was reached.
2136
+ Optional transformation that is applied to the stream.
2137
+
2138
+ @param stopStream - A function that stops the source stream.
2139
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
1736
2140
  */
1737
- finish_reason: 'stop' | 'length' | 'content_filter';
1738
- index: number;
1739
- logprobs: any | null;
1740
- text: string;
1741
- }
1742
- interface CompletionUsage {
2141
+ experimental_transform?: (options: {
2142
+ tools: TOOLS;
2143
+ stopStream: () => void;
2144
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1743
2145
  /**
1744
- * Usage statistics for the completion request.
2146
+ Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
1745
2147
  */
2148
+ onChunk?: (event: {
2149
+ chunk: Extract<TextStreamPart<TOOLS>, {
2150
+ type: 'text-delta' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
2151
+ }>;
2152
+ }) => Promise<void> | void;
1746
2153
  /**
1747
- * Number of tokens in the generated completion.
2154
+ Callback that is called when the LLM response and all request tool executions
2155
+ (for tools that have an `execute` function) are finished.
2156
+
2157
+ The usage is the combined usage of all steps.
1748
2158
  */
1749
- completion_tokens: number;
2159
+ onFinish?: (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
2160
+ /**
2161
+ Details for all steps.
2162
+ */
2163
+ readonly steps: StepResult<TOOLS>[];
2164
+ }) => Promise<void> | void;
1750
2165
  /**
1751
- * Number of tokens in the prompt.
1752
- */
1753
- prompt_tokens: number;
2166
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2167
+ */
2168
+ onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
1754
2169
  /**
1755
- * Total number of tokens used in the request (prompt + completion).
2170
+ * Internal. For test use only. May change without notice.
1756
2171
  */
1757
- total_tokens: number;
1758
- }
1759
- type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
1760
- declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
2172
+ _internal?: {
2173
+ now?: () => number;
2174
+ generateId?: () => string;
2175
+ currentDate?: () => Date;
2176
+ };
2177
+ }): StreamTextResult<TOOLS, PARTIAL_OUTPUT>;
1761
2178
 
1762
- interface FunctionCallPayload {
1763
- name: string;
1764
- arguments: Record<string, unknown>;
1765
- }
1766
- interface ToolCallPayload {
1767
- tools: {
1768
- id: string;
1769
- type: 'function';
1770
- func: {
1771
- name: string;
1772
- arguments: Record<string, unknown>;
1773
- };
1774
- }[];
1775
- }
1776
2179
  /**
1777
- * Configuration options and helper callback methods for AIStream stream lifecycle events.
1778
- * @interface
2180
+ * Smooths text streaming output.
2181
+ *
2182
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
2183
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
2184
+ *
2185
+ * @returns A transform stream that smooths text streaming output.
1779
2186
  */
1780
- interface AIStreamCallbacksAndOptions {
1781
- /** `onStart`: Called once when the stream is initialized. */
1782
- onStart?: () => Promise<void> | void;
1783
- /** `onCompletion`: Called for each tokenized message. */
1784
- onCompletion?: (completion: string) => Promise<void> | void;
1785
- /** `onFinal`: Called once when the stream is closed with the final completion message. */
1786
- onFinal?: (completion: string) => Promise<void> | void;
1787
- /** `onToken`: Called for each tokenized message. */
1788
- onToken?: (token: string) => Promise<void> | void;
1789
- /** `onText`: Called for each text chunk. */
1790
- onText?: (text: string) => Promise<void> | void;
2187
+ declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
2188
+ delayInMs?: number | null;
2189
+ chunking?: 'word' | 'line' | RegExp;
1791
2190
  /**
1792
- * @deprecated This flag is no longer used and only retained for backwards compatibility.
1793
- * You can remove it from your code.
2191
+ * Internal. For test use only. May change without notice.
1794
2192
  */
1795
- experimental_streamData?: boolean;
1796
- }
1797
- /**
1798
- * Options for the AIStreamParser.
1799
- * @interface
1800
- * @property {string} event - The event (type) from the server side event stream.
1801
- */
1802
- interface AIStreamParserOptions {
1803
- event?: string;
1804
- }
1805
- /**
1806
- * Custom parser for AIStream data.
1807
- * @interface
1808
- * @param {string} data - The data to be parsed.
1809
- * @param {AIStreamParserOptions} options - The options for the parser.
1810
- * @returns {string | void} The parsed data or void.
1811
- */
1812
- interface AIStreamParser {
1813
- (data: string, options: AIStreamParserOptions): string | void | {
1814
- isText: false;
1815
- content: string;
2193
+ _internal?: {
2194
+ delay?: (delayInMs: number | null) => Promise<void>;
1816
2195
  };
1817
- }
1818
- /**
1819
- * Creates a TransformStream that parses events from an EventSource stream using a custom parser.
1820
- * @param {AIStreamParser} customParser - Function to handle event data.
1821
- * @returns {TransformStream<Uint8Array, string>} TransformStream parsing events.
1822
- */
1823
- declare function createEventStreamTransformer(customParser?: AIStreamParser): TransformStream<Uint8Array, string | {
1824
- isText: false;
1825
- content: string;
1826
- }>;
2196
+ }): (options: {
2197
+ tools: TOOLS;
2198
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2199
+
2200
+ /**
2201
+ * Experimental middleware for LanguageModelV1.
2202
+ * This type defines the structure for middleware that can be used to modify
2203
+ * the behavior of LanguageModelV1 operations.
2204
+ */
2205
+ type Experimental_LanguageModelV1Middleware = {
2206
+ /**
2207
+ * Transforms the parameters before they are passed to the language model.
2208
+ * @param options - Object containing the type of operation and the parameters.
2209
+ * @param options.type - The type of operation ('generate' or 'stream').
2210
+ * @param options.params - The original parameters for the language model call.
2211
+ * @returns A promise that resolves to the transformed parameters.
2212
+ */
2213
+ transformParams?: (options: {
2214
+ type: 'generate' | 'stream';
2215
+ params: LanguageModelV1CallOptions;
2216
+ }) => PromiseLike<LanguageModelV1CallOptions>;
2217
+ /**
2218
+ * Wraps the generate operation of the language model.
2219
+ * @param options - Object containing the generate function, parameters, and model.
2220
+ * @param options.doGenerate - The original generate function.
2221
+ * @param options.params - The parameters for the generate call. If the
2222
+ * `transformParams` middleware is used, this will be the transformed parameters.
2223
+ * @param options.model - The language model instance.
2224
+ * @returns A promise that resolves to the result of the generate operation.
2225
+ */
2226
+ wrapGenerate?: (options: {
2227
+ doGenerate: () => ReturnType<LanguageModelV1['doGenerate']>;
2228
+ params: LanguageModelV1CallOptions;
2229
+ model: LanguageModelV1;
2230
+ }) => Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
2231
+ /**
2232
+ * Wraps the stream operation of the language model.
2233
+ * @param options - Object containing the stream function, parameters, and model.
2234
+ * @param options.doStream - The original stream function.
2235
+ * @param options.params - The parameters for the stream call. If the
2236
+ * `transformParams` middleware is used, this will be the transformed parameters.
2237
+ * @param options.model - The language model instance.
2238
+ * @returns A promise that resolves to the result of the stream operation.
2239
+ */
2240
+ wrapStream?: (options: {
2241
+ doStream: () => ReturnType<LanguageModelV1['doStream']>;
2242
+ params: LanguageModelV1CallOptions;
2243
+ model: LanguageModelV1;
2244
+ }) => PromiseLike<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
2245
+ };
2246
+
1827
2247
  /**
1828
- * Creates a transform stream that encodes input messages and invokes optional callback functions.
1829
- * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
1830
- * - `onStart`: Called once when the stream is initialized.
1831
- * - `onToken`: Called for each tokenized message.
1832
- * - `onCompletion`: Called every time an AIStream completion message is received. This can occur multiple times when using e.g. OpenAI functions
1833
- * - `onFinal`: Called once when the stream is closed with the final completion message.
2248
+ * Wraps a LanguageModelV1 instance with middleware functionality.
2249
+ * This function allows you to apply middleware to transform parameters,
2250
+ * wrap generate operations, and wrap stream operations of a language model.
1834
2251
  *
1835
- * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
2252
+ * @param options - Configuration options for wrapping the language model.
2253
+ * @param options.model - The original LanguageModelV1 instance to be wrapped.
2254
+ * @param options.middleware - The middleware to be applied to the language model.
2255
+ * @param options.modelId - Optional custom model ID to override the original model's ID.
2256
+ * @param options.providerId - Optional custom provider ID to override the original model's provider.
2257
+ * @returns A new LanguageModelV1 instance with middleware applied.
2258
+ */
2259
+ declare const experimental_wrapLanguageModel: ({ model, middleware: { transformParams, wrapGenerate, wrapStream }, modelId, providerId, }: {
2260
+ model: LanguageModelV1;
2261
+ middleware: Experimental_LanguageModelV1Middleware;
2262
+ modelId?: string;
2263
+ providerId?: string;
2264
+ }) => LanguageModelV1;
2265
+
2266
+ /**
2267
+ * Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
1836
2268
  *
1837
- * @param {AIStreamCallbacksAndOptions} [callbacks] - An object containing the callback functions.
1838
- * @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
2269
+ * @param {Object} options - The options for creating the custom provider.
2270
+ * @param {Record<string, LanguageModelV1>} [options.languageModels] - A record of language models, where keys are model IDs and values are LanguageModelV1 instances.
2271
+ * @param {Record<string, EmbeddingModelV1<string>>} [options.textEmbeddingModels] - A record of text embedding models, where keys are model IDs and values are EmbeddingModelV1<string> instances.
2272
+ * @param {Provider} [options.fallbackProvider] - An optional fallback provider to use when a requested model is not found in the custom provider.
2273
+ * @returns {Provider} A Provider object with languageModel and textEmbeddingModel methods.
1839
2274
  *
1840
- * @example
1841
- * const callbacks = {
1842
- * onStart: async () => console.log('Stream started'),
1843
- * onToken: async (token) => console.log(`Token: ${token}`),
1844
- * onCompletion: async (completion) => console.log(`Completion: ${completion}`)
1845
- * onFinal: async () => data.close()
1846
- * };
1847
- * const transformer = createCallbacksTransformer(callbacks);
1848
- */
1849
- declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions | OpenAIStreamCallbacks | undefined): TransformStream<string | {
1850
- isText: false;
1851
- content: string;
1852
- }, Uint8Array>;
2275
+ * @throws {NoSuchModelError} Throws when a requested model is not found and no fallback provider is available.
2276
+ */
2277
+ declare function experimental_customProvider({ languageModels, textEmbeddingModels, fallbackProvider, }: {
2278
+ languageModels?: Record<string, LanguageModelV1>;
2279
+ textEmbeddingModels?: Record<string, EmbeddingModelV1<string>>;
2280
+ fallbackProvider?: Provider;
2281
+ }): Provider;
2282
+
2283
+ declare const symbol$a: unique symbol;
2284
+ declare class NoSuchProviderError extends NoSuchModelError {
2285
+ private readonly [symbol$a];
2286
+ readonly providerId: string;
2287
+ readonly availableProviders: string[];
2288
+ constructor({ modelId, modelType, providerId, availableProviders, message, }: {
2289
+ modelId: string;
2290
+ modelType: 'languageModel' | 'textEmbeddingModel';
2291
+ providerId: string;
2292
+ availableProviders: string[];
2293
+ message?: string;
2294
+ });
2295
+ static isInstance(error: unknown): error is NoSuchProviderError;
2296
+ }
2297
+
1853
2298
  /**
1854
- * Returns a stateful function that, when invoked, trims leading whitespace
1855
- * from the input text. The trimming only occurs on the first invocation, ensuring that
1856
- * subsequent calls do not alter the input text. This is particularly useful in scenarios
1857
- * where a text stream is being processed and only the initial whitespace should be removed.
1858
- *
1859
- * @return {function(string): string} A function that takes a string as input and returns a string
1860
- * with leading whitespace removed if it is the first invocation; otherwise, it returns the input unchanged.
1861
- *
1862
- * @example
1863
- * const trimStart = trimStartOfStreamHelper();
1864
- * const output1 = trimStart(" text"); // "text"
1865
- * const output2 = trimStart(" text"); // " text"
1866
- *
2299
+ * Creates a registry for the given providers.
1867
2300
  */
1868
- declare function trimStartOfStreamHelper(): (text: string) => string;
2301
+ declare function experimental_createProviderRegistry(providers: Record<string, Provider>): Provider;
2302
+
1869
2303
  /**
1870
- * Returns a ReadableStream created from the response, parsed and handled with custom logic.
1871
- * The stream goes through two transformation stages, first parsing the events and then
1872
- * invoking the provided callbacks.
2304
+ * Calculates the cosine similarity between two vectors. This is a useful metric for
2305
+ * comparing the similarity of two vectors such as embeddings.
1873
2306
  *
1874
- * For 2xx HTTP responses:
1875
- * - The function continues with standard stream processing.
2307
+ * @param vector1 - The first vector.
2308
+ * @param vector2 - The second vector.
1876
2309
  *
1877
- * For non-2xx HTTP responses:
1878
- * - If the response body is defined, it asynchronously extracts and decodes the response body.
1879
- * - It then creates a custom ReadableStream to propagate a detailed error message.
1880
- *
1881
- * @param {Response} response - The response.
1882
- * @param {AIStreamParser} customParser - The custom parser function.
1883
- * @param {AIStreamCallbacksAndOptions} callbacks - The callbacks.
1884
- * @return {ReadableStream} The AIStream.
1885
- * @throws Will throw an error if the response is not OK.
2310
+ * @returns The cosine similarity between vector1 and vector2.
2311
+ * @throws {Error} If the vectors do not have the same length.
1886
2312
  */
1887
- declare function AIStream(response: Response, customParser?: AIStreamParser, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
2313
+ declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
2314
+
1888
2315
  /**
1889
- * Implements ReadableStream.from(asyncIterable), which isn't documented in MDN and isn't implemented in node.
1890
- * https://github.com/whatwg/streams/commit/8d7a0bf26eb2cc23e884ddbaac7c1da4b91cf2bc
1891
- */
1892
- declare function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>): ReadableStream<T>;
2316
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
2317
+ *
2318
+ * @param options - The configuration options
2319
+ * @param options.chunks - Array of values to be emitted by the stream
2320
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2321
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2322
+ * @returns A ReadableStream that emits the provided values
2323
+ */
2324
+ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
2325
+ chunks: T[];
2326
+ initialDelayInMs?: number | null;
2327
+ chunkDelayInMs?: number | null;
2328
+ _internal?: {
2329
+ delay?: (ms: number | null) => Promise<void>;
2330
+ };
2331
+ }): ReadableStream<T>;
2332
+
2333
+ declare const symbol$9: unique symbol;
2334
+ declare class InvalidArgumentError extends AISDKError {
2335
+ private readonly [symbol$9];
2336
+ readonly parameter: string;
2337
+ readonly value: unknown;
2338
+ constructor({ parameter, value, message, }: {
2339
+ parameter: string;
2340
+ value: unknown;
2341
+ message: string;
2342
+ });
2343
+ static isInstance(error: unknown): error is InvalidArgumentError;
2344
+ }
1893
2345
 
1894
- interface CompletionChunk {
1895
- /**
1896
- * Unique object identifier.
1897
- *
1898
- * The format and length of IDs may change over time.
1899
- */
1900
- id: string;
1901
- /**
1902
- * The resulting completion up to and excluding the stop sequences.
1903
- */
1904
- completion: string;
2346
+ declare const symbol$8: unique symbol;
2347
+ /**
2348
+ Thrown when no object could be generated. This can have several causes:
2349
+
2350
+ - The model failed to generate a response.
2351
+ - The model generated a response that could not be parsed.
2352
+ - The model generated a response that could not be validated against the schema.
2353
+
2354
+ The error contains the following properties:
2355
+
2356
+ - `text`: The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
2357
+ */
2358
+ declare class NoObjectGeneratedError extends AISDKError {
2359
+ private readonly [symbol$8];
1905
2360
  /**
1906
- * The model that handled the request.
2361
+ The text that was generated by the model. This can be the raw text or the tool call text, depending on the model.
1907
2362
  */
1908
- model: string;
2363
+ readonly text: string | undefined;
1909
2364
  /**
1910
- * The reason that we stopped.
1911
- *
1912
- * This may be one the following values:
1913
- *
1914
- * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
1915
- * `stop_sequences` parameter, or a stop sequence built into the model
1916
- * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
2365
+ The response metadata.
1917
2366
  */
1918
- stop_reason: string | null;
2367
+ readonly response: LanguageModelResponseMetadata | undefined;
1919
2368
  /**
1920
- * Object type.
1921
- *
1922
- * For Text Completions, this is always `"completion"`.
2369
+ The usage of the model.
1923
2370
  */
1924
- type: 'completion';
1925
- }
1926
- interface Message {
1927
- id: string;
1928
- content: Array<ContentBlock>;
1929
- model: string;
1930
- role: 'assistant';
1931
- stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
1932
- stop_sequence: string | null;
1933
- type: 'message';
1934
- }
1935
- interface ContentBlock {
1936
- text: string;
1937
- type: 'text';
1938
- }
1939
- interface TextDelta {
1940
- text: string;
1941
- type: 'text_delta';
1942
- }
1943
- interface ContentBlockDeltaEvent {
1944
- delta: TextDelta;
1945
- index: number;
1946
- type: 'content_block_delta';
2371
+ readonly usage: LanguageModelUsage | undefined;
2372
+ constructor({ message, cause, text, response, usage, }: {
2373
+ message?: string;
2374
+ cause?: Error;
2375
+ text?: string;
2376
+ response: LanguageModelResponseMetadata;
2377
+ usage: LanguageModelUsage;
2378
+ });
2379
+ static isInstance(error: unknown): error is NoObjectGeneratedError;
1947
2380
  }
1948
- interface ContentBlockStartEvent {
1949
- content_block: ContentBlock;
1950
- index: number;
1951
- type: 'content_block_start';
2381
+
2382
+ declare const symbol$7: unique symbol;
2383
+ /**
2384
+ Thrown when no output type is specified and output-related methods are called.
2385
+ */
2386
+ declare class NoOutputSpecifiedError extends AISDKError {
2387
+ private readonly [symbol$7];
2388
+ constructor({ message }?: {
2389
+ message?: string;
2390
+ });
2391
+ static isInstance(error: unknown): error is NoOutputSpecifiedError;
1952
2392
  }
1953
- interface ContentBlockStopEvent {
1954
- index: number;
1955
- type: 'content_block_stop';
2393
+
2394
+ declare const symbol$6: unique symbol;
2395
+ declare class ToolCallRepairError extends AISDKError {
2396
+ private readonly [symbol$6];
2397
+ readonly originalError: NoSuchToolError | InvalidToolArgumentsError;
2398
+ constructor({ cause, originalError, message, }: {
2399
+ message?: string;
2400
+ cause: unknown;
2401
+ originalError: NoSuchToolError | InvalidToolArgumentsError;
2402
+ });
2403
+ static isInstance(error: unknown): error is ToolCallRepairError;
2404
+ }
2405
+
2406
+ declare const symbol$5: unique symbol;
2407
+ declare class ToolExecutionError extends AISDKError {
2408
+ private readonly [symbol$5];
2409
+ readonly toolName: string;
2410
+ readonly toolArgs: JSONValue;
2411
+ readonly toolCallId: string;
2412
+ constructor({ toolArgs, toolName, toolCallId, cause, message, }: {
2413
+ message?: string;
2414
+ toolArgs: JSONValue;
2415
+ toolName: string;
2416
+ toolCallId: string;
2417
+ cause: unknown;
2418
+ });
2419
+ static isInstance(error: unknown): error is ToolExecutionError;
1956
2420
  }
1957
- interface MessageDeltaEventDelta {
1958
- stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
1959
- stop_sequence: string | null;
2421
+
2422
+ declare const symbol$4: unique symbol;
2423
+ declare class InvalidDataContentError extends AISDKError {
2424
+ private readonly [symbol$4];
2425
+ readonly content: unknown;
2426
+ constructor({ content, cause, message, }: {
2427
+ content: unknown;
2428
+ cause?: unknown;
2429
+ message?: string;
2430
+ });
2431
+ static isInstance(error: unknown): error is InvalidDataContentError;
1960
2432
  }
1961
- interface MessageDeltaEvent {
1962
- delta: MessageDeltaEventDelta;
1963
- type: 'message_delta';
2433
+
2434
+ declare const symbol$3: unique symbol;
2435
+ declare class InvalidMessageRoleError extends AISDKError {
2436
+ private readonly [symbol$3];
2437
+ readonly role: string;
2438
+ constructor({ role, message, }: {
2439
+ role: string;
2440
+ message?: string;
2441
+ });
2442
+ static isInstance(error: unknown): error is InvalidMessageRoleError;
1964
2443
  }
1965
- type MessageStreamEvent = MessageStartEvent | MessageDeltaEvent | MessageStopEvent | ContentBlockStartEvent | ContentBlockDeltaEvent | ContentBlockStopEvent;
1966
- interface MessageStartEvent {
1967
- message: Message;
1968
- type: 'message_start';
2444
+
2445
+ declare const symbol$2: unique symbol;
2446
+ declare class MessageConversionError extends AISDKError {
2447
+ private readonly [symbol$2];
2448
+ readonly originalMessage: UIMessage;
2449
+ constructor({ originalMessage, message, }: {
2450
+ originalMessage: UIMessage;
2451
+ message: string;
2452
+ });
2453
+ static isInstance(error: unknown): error is MessageConversionError;
1969
2454
  }
1970
- interface MessageStopEvent {
1971
- type: 'message_stop';
2455
+
2456
+ declare const symbol$1: unique symbol;
2457
+ declare class DownloadError extends AISDKError {
2458
+ private readonly [symbol$1];
2459
+ readonly url: string;
2460
+ readonly statusCode?: number;
2461
+ readonly statusText?: string;
2462
+ constructor({ url, statusCode, statusText, cause, message, }: {
2463
+ url: string;
2464
+ statusCode?: number;
2465
+ statusText?: string;
2466
+ message?: string;
2467
+ cause?: unknown;
2468
+ });
2469
+ static isInstance(error: unknown): error is DownloadError;
2470
+ }
2471
+
2472
+ declare const symbol: unique symbol;
2473
+ type RetryErrorReason = 'maxRetriesExceeded' | 'errorNotRetryable' | 'abort';
2474
+ declare class RetryError extends AISDKError {
2475
+ private readonly [symbol];
2476
+ readonly reason: RetryErrorReason;
2477
+ readonly lastError: unknown;
2478
+ readonly errors: Array<unknown>;
2479
+ constructor({ message, reason, errors, }: {
2480
+ message: string;
2481
+ reason: RetryErrorReason;
2482
+ errors: Array<unknown>;
2483
+ });
2484
+ static isInstance(error: unknown): error is RetryError;
1972
2485
  }
1973
- /**
1974
- * Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
1975
- * or the return value of `await client.completions.create({ stream: true })`
1976
- * from the `@anthropic-ai/sdk` package.
1977
- */
1978
- declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
1979
2486
 
1980
2487
  /**
1981
2488
  You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
@@ -1995,14 +2502,6 @@ The process parameter is a callback in which you can run the assistant on thread
1995
2502
  */
1996
2503
  type AssistantResponseCallback = (options: {
1997
2504
  /**
1998
- @deprecated use variable from outer scope instead.
1999
- */
2000
- threadId: string;
2001
- /**
2002
- @deprecated use variable from outer scope instead.
2003
- */
2004
- messageId: string;
2005
- /**
2006
2505
  Forwards an assistant message (non-streaming) to the client.
2007
2506
  */
2008
2507
  sendMessage: (message: AssistantMessage) => void;
@@ -2013,7 +2512,7 @@ type AssistantResponseCallback = (options: {
2013
2512
  /**
2014
2513
  Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
2015
2514
  */
2016
- forwardStream: (stream: AssistantStream) => Promise<Run | undefined>;
2515
+ forwardStream: (stream: any) => Promise<any | undefined>;
2017
2516
  }) => Promise<void>;
2018
2517
  /**
2019
2518
  The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
@@ -2021,65 +2520,26 @@ It is designed to facilitate streaming assistant responses to the `useAssistant`
2021
2520
  It receives an assistant thread and a current message, and can send messages and data messages to the client.
2022
2521
  */
2023
2522
  declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
2523
+
2024
2524
  /**
2025
- @deprecated Use `AssistantResponse` instead.
2525
+ * Configuration options and helper callback methods for stream lifecycle events.
2026
2526
  */
2027
- declare const experimental_AssistantResponse: typeof AssistantResponse;
2028
-
2029
- interface AWSBedrockResponse {
2030
- body?: AsyncIterable<{
2031
- chunk?: {
2032
- bytes?: Uint8Array;
2033
- };
2034
- }>;
2035
- }
2036
- declare function AWSBedrockAnthropicMessagesStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2037
- declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2038
- declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2039
- declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2040
- declare function AWSBedrockStream(response: AWSBedrockResponse, callbacks: AIStreamCallbacksAndOptions | undefined, extractTextDeltaFromChunk: (chunk: any) => string): ReadableStream<any>;
2041
-
2042
- interface StreamChunk {
2043
- text?: string;
2044
- eventType: 'stream-start' | 'search-queries-generation' | 'search-results' | 'text-generation' | 'citation-generation' | 'stream-end';
2045
- }
2046
- declare function CohereStream(reader: Response | AsyncIterable<StreamChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2047
-
2048
- interface GenerateContentResponse {
2049
- candidates?: GenerateContentCandidate[];
2050
- }
2051
- interface GenerateContentCandidate {
2052
- index: number;
2053
- content: Content;
2054
- }
2055
- interface Content {
2056
- role: string;
2057
- parts: Part[];
2058
- }
2059
- type Part = TextPart | InlineDataPart;
2060
- interface InlineDataPart {
2061
- text?: never;
2062
- }
2063
- interface TextPart {
2064
- text: string;
2065
- inlineData?: never;
2527
+ interface StreamCallbacks {
2528
+ /** `onStart`: Called once when the stream is initialized. */
2529
+ onStart?: () => Promise<void> | void;
2530
+ /**
2531
+ * `onCompletion`: Called for each tokenized message.
2532
+ *
2533
+ * @deprecated Use `onFinal` instead.
2534
+ */
2535
+ onCompletion?: (completion: string) => Promise<void> | void;
2536
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
2537
+ onFinal?: (completion: string) => Promise<void> | void;
2538
+ /** `onToken`: Called for each tokenized message. */
2539
+ onToken?: (token: string) => Promise<void> | void;
2540
+ /** `onText`: Called for each text chunk. */
2541
+ onText?: (text: string) => Promise<void> | void;
2066
2542
  }
2067
- declare function GoogleGenerativeAIStream(response: {
2068
- stream: AsyncIterable<GenerateContentResponse>;
2069
- }, cb?: AIStreamCallbacksAndOptions): ReadableStream;
2070
-
2071
- declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2072
-
2073
- type InkeepOnFinalMetadata = {
2074
- chat_session_id: string;
2075
- records_cited: any;
2076
- };
2077
- type InkeepChatResultCallbacks = {
2078
- onFinal?: (completion: string, metadata?: InkeepOnFinalMetadata) => Promise<void> | void;
2079
- onRecordsCited?: (records_cited: InkeepOnFinalMetadata['records_cited']) => void;
2080
- };
2081
- type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
2082
- declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
2083
2543
 
2084
2544
  type LangChainImageDetail = 'auto' | 'low' | 'high';
2085
2545
  type LangChainMessageContentText = {
@@ -2102,186 +2562,59 @@ type LangChainMessageContent = string | LangChainMessageContentComplex[];
2102
2562
  type LangChainAIMessageChunk = {
2103
2563
  content: LangChainMessageContent;
2104
2564
  };
2565
+ type LangChainStreamEvent = {
2566
+ event: string;
2567
+ data: any;
2568
+ };
2105
2569
  /**
2106
- Converts the result of a LangChain Expression Language stream invocation to an AIStream.
2570
+ Converts LangChain output streams to an AI SDK Data Stream.
2571
+
2572
+ The following streams are supported:
2573
+ - `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
2574
+ - `string` streams (LangChain `StringOutputParser` output)
2107
2575
  */
2108
- declare function toAIStream(stream: ReadableStream<LangChainAIMessageChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
2576
+ declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
2577
+ declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
2578
+ init?: ResponseInit;
2579
+ data?: StreamData;
2580
+ callbacks?: StreamCallbacks;
2581
+ }): Response;
2582
+ declare function mergeIntoDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options: {
2583
+ dataStream: DataStreamWriter;
2584
+ callbacks?: StreamCallbacks;
2585
+ }): void;
2109
2586
 
2110
- declare const langchainAdapter_toAIStream: typeof toAIStream;
2111
2587
  declare namespace langchainAdapter {
2112
2588
  export {
2113
- langchainAdapter_toAIStream as toAIStream,
2589
+ mergeIntoDataStream$1 as mergeIntoDataStream,
2590
+ toDataStream$1 as toDataStream,
2591
+ toDataStreamResponse$1 as toDataStreamResponse,
2114
2592
  };
2115
2593
  }
2116
2594
 
2117
- /**
2118
- @deprecated Use LangChainAdapter.toAIStream() instead.
2119
- */
2120
- declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
2121
- stream: ReadableStream<any>;
2122
- writer: WritableStreamDefaultWriter<any>;
2123
- handlers: {
2124
- handleLLMNewToken: (token: string) => Promise<void>;
2125
- handleLLMStart: (_llm: any, _prompts: string[], runId: string) => Promise<void>;
2126
- handleLLMEnd: (_output: any, runId: string) => Promise<void>;
2127
- handleLLMError: (e: Error, runId: string) => Promise<void>;
2128
- handleChainStart: (_chain: any, _inputs: any, runId: string) => Promise<void>;
2129
- handleChainEnd: (_outputs: any, runId: string) => Promise<void>;
2130
- handleChainError: (e: Error, runId: string) => Promise<void>;
2131
- handleToolStart: (_tool: any, _input: string, runId: string) => Promise<void>;
2132
- handleToolEnd: (_output: string, runId: string) => Promise<void>;
2133
- handleToolError: (e: Error, runId: string) => Promise<void>;
2134
- };
2135
- };
2136
-
2137
- interface ChatCompletionResponseChunk {
2138
- id: string;
2139
- object: 'chat.completion.chunk';
2140
- created: number;
2141
- model: string;
2142
- choices: ChatCompletionResponseChunkChoice[];
2143
- }
2144
- interface ChatCompletionResponseChunkChoice {
2145
- index: number;
2146
- delta: {
2147
- role?: string;
2148
- content?: string;
2149
- tool_calls?: ToolCalls[];
2150
- };
2151
- finish_reason: string;
2152
- }
2153
- interface FunctionCall {
2154
- name: string;
2155
- arguments: string;
2156
- }
2157
- interface ToolCalls {
2158
- id: 'null';
2159
- type: 'function';
2160
- function: FunctionCall;
2161
- }
2162
- declare function MistralStream(response: AsyncGenerator<ChatCompletionResponseChunk, void, unknown>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
2163
-
2164
- interface Prediction {
2165
- id: string;
2166
- status: 'starting' | 'processing' | 'succeeded' | 'failed' | 'canceled';
2167
- version: string;
2168
- input: object;
2169
- output?: any;
2170
- source: 'api' | 'web';
2171
- error?: any;
2172
- logs?: string;
2173
- metrics?: {
2174
- predict_time?: number;
2175
- };
2176
- webhook?: string;
2177
- webhook_events_filter?: ('start' | 'output' | 'logs' | 'completed')[];
2178
- created_at: string;
2179
- updated_at?: string;
2180
- completed_at?: string;
2181
- urls: {
2182
- get: string;
2183
- cancel: string;
2184
- stream?: string;
2185
- };
2186
- }
2187
- /**
2188
- * Stream predictions from Replicate.
2189
- * Only certain models are supported and you must pass `stream: true` to
2190
- * replicate.predictions.create().
2191
- * @see https://github.com/replicate/replicate-javascript#streaming
2192
- *
2193
- * @example
2194
- * const response = await replicate.predictions.create({
2195
- * stream: true,
2196
- * input: {
2197
- * prompt: messages.join('\n')
2198
- * },
2199
- * version: '2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1'
2200
- * })
2201
- *
2202
- * const stream = await ReplicateStream(response)
2203
- * return new StreamingTextResponse(stream)
2204
- *
2205
- */
2206
- declare function ReplicateStream(res: Prediction, cb?: AIStreamCallbacksAndOptions, options?: {
2207
- headers?: Record<string, string>;
2208
- }): Promise<ReadableStream>;
2209
-
2210
- /**
2211
- * A stream wrapper to send custom JSON-encoded data back to the client.
2212
- */
2213
- declare class StreamData {
2214
- private encoder;
2215
- private controller;
2216
- stream: ReadableStream<Uint8Array>;
2217
- private isClosed;
2218
- private warningTimeout;
2219
- constructor();
2220
- close(): Promise<void>;
2221
- append(value: JSONValue): void;
2222
- appendMessageAnnotation(value: JSONValue): void;
2223
- }
2224
- /**
2225
- * A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
2226
- * This assumes every chunk is a 'text' chunk.
2227
- */
2228
- declare function createStreamDataTransformer(): TransformStream<any, any>;
2229
- /**
2230
- @deprecated Use `StreamData` instead.
2231
- */
2232
- declare class experimental_StreamData extends StreamData {
2233
- }
2234
-
2235
- /**
2236
- * A utility function to stream a ReadableStream to a Node.js response-like object.
2237
- */
2238
- declare function streamToResponse(res: ReadableStream, response: ServerResponse, init?: {
2239
- headers?: Record<string, string>;
2240
- status?: number;
2241
- }, data?: StreamData): void;
2242
-
2243
- /**
2244
- * This is a naive implementation of the streaming React response API.
2245
- * Currently, it can carry the original raw content, data payload and a special
2246
- * UI payload and stream them via "rows" (nested promises).
2247
- * It must be used inside Server Actions so Flight can encode the React elements.
2248
- *
2249
- * It is naive as unlike the StreamingTextResponse, it does not send the diff
2250
- * between the rows, but flushing the full payload on each row.
2251
- */
2252
-
2253
- type UINode = string | JSX.Element | JSX.Element[] | null | undefined;
2254
- type Payload = {
2255
- ui: UINode | Promise<UINode>;
2256
- content: string;
2257
- };
2258
- /**
2259
- @deprecated Use AI SDK RSC instead: https://sdk.vercel.ai/docs/ai-sdk-rsc
2260
- */
2261
- type ReactResponseRow = Payload & {
2262
- next: null | Promise<ReactResponseRow>;
2595
+ type EngineResponse = {
2596
+ delta: string;
2263
2597
  };
2264
- /**
2265
- A utility class for streaming React responses.
2266
-
2267
- @deprecated Use AI SDK RSC instead: https://sdk.vercel.ai/docs/ai-sdk-rsc
2268
- */
2269
- declare class experimental_StreamingReactResponse {
2270
- constructor(res: ReadableStream, options?: {
2271
- ui?: (message: {
2272
- content: string;
2273
- data?: JSONValue[];
2274
- }) => UINode | Promise<UINode>;
2275
- data?: StreamData;
2276
- generateId?: IdGenerator;
2277
- });
2278
- }
2279
-
2280
- /**
2281
- * A utility class for streaming text responses.
2282
- */
2283
- declare class StreamingTextResponse extends Response {
2284
- constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
2598
+ declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array>;
2599
+ declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
2600
+ init?: ResponseInit;
2601
+ data?: StreamData;
2602
+ callbacks?: StreamCallbacks;
2603
+ }): Response;
2604
+ declare function mergeIntoDataStream(stream: AsyncIterable<EngineResponse>, options: {
2605
+ dataStream: DataStreamWriter;
2606
+ callbacks?: StreamCallbacks;
2607
+ }): void;
2608
+
2609
+ declare const llamaindexAdapter_mergeIntoDataStream: typeof mergeIntoDataStream;
2610
+ declare const llamaindexAdapter_toDataStream: typeof toDataStream;
2611
+ declare const llamaindexAdapter_toDataStreamResponse: typeof toDataStreamResponse;
2612
+ declare namespace llamaindexAdapter {
2613
+ export {
2614
+ llamaindexAdapter_mergeIntoDataStream as mergeIntoDataStream,
2615
+ llamaindexAdapter_toDataStream as toDataStream,
2616
+ llamaindexAdapter_toDataStreamResponse as toDataStreamResponse,
2617
+ };
2285
2618
  }
2286
2619
 
2287
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, AssistantStatus, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseAssistantOptions, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamResponse, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2620
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };