ai 5.0.0-canary.20 → 5.0.0-canary.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,494 +1,12 @@
1
- import { ToolCall, ToolResult, ToolResultContent, Schema, IdGenerator, FetchFunction } from '@ai-sdk/provider-utils';
1
+ import { ToolResultContent, Schema, ToolCall, ToolResult, IdGenerator, FetchFunction } from '@ai-sdk/provider-utils';
2
2
  export { IdGenerator, Schema, ToolCall, ToolResult, asSchema, createIdGenerator, generateId, jsonSchema } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
4
- import { ServerResponse } from 'node:http';
5
3
  import { AISDKError, SharedV2ProviderMetadata, SharedV2ProviderOptions, EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SpeechModelV1, SpeechModelV1CallWarning, TranscriptionModelV1, TranscriptionModelV1CallWarning, LanguageModelV2Usage, JSONObject, LanguageModelV2ToolCall, JSONSchema7, LanguageModelV2CallOptions, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
6
4
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
5
+ import { z } from 'zod';
6
+ import { ServerResponse } from 'node:http';
7
7
  import { AttributeValue, Tracer } from '@opentelemetry/api';
8
8
  import { ServerResponse as ServerResponse$1 } from 'http';
9
9
 
10
- declare const dataStreamPartSchema: z.ZodDiscriminatedUnion<"type", [z.ZodObject<{
11
- type: z.ZodLiteral<"text">;
12
- value: z.ZodString;
13
- }, "strip", z.ZodTypeAny, {
14
- type: "text";
15
- value: string;
16
- }, {
17
- type: "text";
18
- value: string;
19
- }>, z.ZodObject<{
20
- type: z.ZodLiteral<"data">;
21
- value: z.ZodArray<z.ZodAny, "many">;
22
- }, "strip", z.ZodTypeAny, {
23
- type: "data";
24
- value: any[];
25
- }, {
26
- type: "data";
27
- value: any[];
28
- }>, z.ZodObject<{
29
- type: z.ZodLiteral<"error">;
30
- value: z.ZodString;
31
- }, "strip", z.ZodTypeAny, {
32
- type: "error";
33
- value: string;
34
- }, {
35
- type: "error";
36
- value: string;
37
- }>, z.ZodObject<{
38
- type: z.ZodLiteral<"message-annotations">;
39
- value: z.ZodArray<z.ZodAny, "many">;
40
- }, "strip", z.ZodTypeAny, {
41
- type: "message-annotations";
42
- value: any[];
43
- }, {
44
- type: "message-annotations";
45
- value: any[];
46
- }>, z.ZodObject<{
47
- type: z.ZodLiteral<"tool-call">;
48
- value: z.ZodObject<{
49
- toolCallId: z.ZodString;
50
- toolName: z.ZodString;
51
- args: z.ZodUnknown;
52
- }, "strip", z.ZodTypeAny, {
53
- toolCallId: string;
54
- toolName: string;
55
- args?: unknown;
56
- }, {
57
- toolCallId: string;
58
- toolName: string;
59
- args?: unknown;
60
- }>;
61
- }, "strip", z.ZodTypeAny, {
62
- type: "tool-call";
63
- value: {
64
- toolCallId: string;
65
- toolName: string;
66
- args?: unknown;
67
- };
68
- }, {
69
- type: "tool-call";
70
- value: {
71
- toolCallId: string;
72
- toolName: string;
73
- args?: unknown;
74
- };
75
- }>, z.ZodObject<{
76
- type: z.ZodLiteral<"tool-result">;
77
- value: z.ZodObject<{
78
- toolCallId: z.ZodString;
79
- result: z.ZodUnknown;
80
- providerMetadata: z.ZodOptional<z.ZodAny>;
81
- }, "strip", z.ZodTypeAny, {
82
- toolCallId: string;
83
- result?: unknown;
84
- providerMetadata?: any;
85
- }, {
86
- toolCallId: string;
87
- result?: unknown;
88
- providerMetadata?: any;
89
- }>;
90
- }, "strip", z.ZodTypeAny, {
91
- type: "tool-result";
92
- value: {
93
- toolCallId: string;
94
- result?: unknown;
95
- providerMetadata?: any;
96
- };
97
- }, {
98
- type: "tool-result";
99
- value: {
100
- toolCallId: string;
101
- result?: unknown;
102
- providerMetadata?: any;
103
- };
104
- }>, z.ZodObject<{
105
- type: z.ZodLiteral<"tool-call-streaming-start">;
106
- value: z.ZodObject<{
107
- toolCallId: z.ZodString;
108
- toolName: z.ZodString;
109
- }, "strip", z.ZodTypeAny, {
110
- toolCallId: string;
111
- toolName: string;
112
- }, {
113
- toolCallId: string;
114
- toolName: string;
115
- }>;
116
- }, "strip", z.ZodTypeAny, {
117
- type: "tool-call-streaming-start";
118
- value: {
119
- toolCallId: string;
120
- toolName: string;
121
- };
122
- }, {
123
- type: "tool-call-streaming-start";
124
- value: {
125
- toolCallId: string;
126
- toolName: string;
127
- };
128
- }>, z.ZodObject<{
129
- type: z.ZodLiteral<"tool-call-delta">;
130
- value: z.ZodObject<{
131
- toolCallId: z.ZodString;
132
- argsTextDelta: z.ZodString;
133
- }, "strip", z.ZodTypeAny, {
134
- toolCallId: string;
135
- argsTextDelta: string;
136
- }, {
137
- toolCallId: string;
138
- argsTextDelta: string;
139
- }>;
140
- }, "strip", z.ZodTypeAny, {
141
- type: "tool-call-delta";
142
- value: {
143
- toolCallId: string;
144
- argsTextDelta: string;
145
- };
146
- }, {
147
- type: "tool-call-delta";
148
- value: {
149
- toolCallId: string;
150
- argsTextDelta: string;
151
- };
152
- }>, z.ZodObject<{
153
- type: z.ZodLiteral<"finish-message">;
154
- value: z.ZodObject<{
155
- finishReason: z.ZodEnum<["stop", "length", "tool-calls", "content-filter", "other", "error", "unknown"]>;
156
- usage: z.ZodOptional<z.ZodObject<{
157
- inputTokens: z.ZodOptional<z.ZodNumber>;
158
- outputTokens: z.ZodOptional<z.ZodNumber>;
159
- totalTokens: z.ZodOptional<z.ZodNumber>;
160
- reasoningTokens: z.ZodOptional<z.ZodNumber>;
161
- cachedInputTokens: z.ZodOptional<z.ZodNumber>;
162
- }, "strip", z.ZodTypeAny, {
163
- inputTokens?: number | undefined;
164
- outputTokens?: number | undefined;
165
- totalTokens?: number | undefined;
166
- reasoningTokens?: number | undefined;
167
- cachedInputTokens?: number | undefined;
168
- }, {
169
- inputTokens?: number | undefined;
170
- outputTokens?: number | undefined;
171
- totalTokens?: number | undefined;
172
- reasoningTokens?: number | undefined;
173
- cachedInputTokens?: number | undefined;
174
- }>>;
175
- }, "strip", z.ZodTypeAny, {
176
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
177
- usage?: {
178
- inputTokens?: number | undefined;
179
- outputTokens?: number | undefined;
180
- totalTokens?: number | undefined;
181
- reasoningTokens?: number | undefined;
182
- cachedInputTokens?: number | undefined;
183
- } | undefined;
184
- }, {
185
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
186
- usage?: {
187
- inputTokens?: number | undefined;
188
- outputTokens?: number | undefined;
189
- totalTokens?: number | undefined;
190
- reasoningTokens?: number | undefined;
191
- cachedInputTokens?: number | undefined;
192
- } | undefined;
193
- }>;
194
- }, "strip", z.ZodTypeAny, {
195
- type: "finish-message";
196
- value: {
197
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
198
- usage?: {
199
- inputTokens?: number | undefined;
200
- outputTokens?: number | undefined;
201
- totalTokens?: number | undefined;
202
- reasoningTokens?: number | undefined;
203
- cachedInputTokens?: number | undefined;
204
- } | undefined;
205
- };
206
- }, {
207
- type: "finish-message";
208
- value: {
209
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
210
- usage?: {
211
- inputTokens?: number | undefined;
212
- outputTokens?: number | undefined;
213
- totalTokens?: number | undefined;
214
- reasoningTokens?: number | undefined;
215
- cachedInputTokens?: number | undefined;
216
- } | undefined;
217
- };
218
- }>, z.ZodObject<{
219
- type: z.ZodLiteral<"finish-step">;
220
- value: z.ZodObject<{
221
- isContinued: z.ZodBoolean;
222
- finishReason: z.ZodEnum<["stop", "length", "tool-calls", "content-filter", "other", "error", "unknown"]>;
223
- usage: z.ZodOptional<z.ZodObject<{
224
- inputTokens: z.ZodOptional<z.ZodNumber>;
225
- outputTokens: z.ZodOptional<z.ZodNumber>;
226
- totalTokens: z.ZodOptional<z.ZodNumber>;
227
- reasoningTokens: z.ZodOptional<z.ZodNumber>;
228
- cachedInputTokens: z.ZodOptional<z.ZodNumber>;
229
- }, "strip", z.ZodTypeAny, {
230
- inputTokens?: number | undefined;
231
- outputTokens?: number | undefined;
232
- totalTokens?: number | undefined;
233
- reasoningTokens?: number | undefined;
234
- cachedInputTokens?: number | undefined;
235
- }, {
236
- inputTokens?: number | undefined;
237
- outputTokens?: number | undefined;
238
- totalTokens?: number | undefined;
239
- reasoningTokens?: number | undefined;
240
- cachedInputTokens?: number | undefined;
241
- }>>;
242
- }, "strip", z.ZodTypeAny, {
243
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
244
- isContinued: boolean;
245
- usage?: {
246
- inputTokens?: number | undefined;
247
- outputTokens?: number | undefined;
248
- totalTokens?: number | undefined;
249
- reasoningTokens?: number | undefined;
250
- cachedInputTokens?: number | undefined;
251
- } | undefined;
252
- }, {
253
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
254
- isContinued: boolean;
255
- usage?: {
256
- inputTokens?: number | undefined;
257
- outputTokens?: number | undefined;
258
- totalTokens?: number | undefined;
259
- reasoningTokens?: number | undefined;
260
- cachedInputTokens?: number | undefined;
261
- } | undefined;
262
- }>;
263
- }, "strip", z.ZodTypeAny, {
264
- type: "finish-step";
265
- value: {
266
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
267
- isContinued: boolean;
268
- usage?: {
269
- inputTokens?: number | undefined;
270
- outputTokens?: number | undefined;
271
- totalTokens?: number | undefined;
272
- reasoningTokens?: number | undefined;
273
- cachedInputTokens?: number | undefined;
274
- } | undefined;
275
- };
276
- }, {
277
- type: "finish-step";
278
- value: {
279
- finishReason: "length" | "stop" | "tool-calls" | "content-filter" | "other" | "error" | "unknown";
280
- isContinued: boolean;
281
- usage?: {
282
- inputTokens?: number | undefined;
283
- outputTokens?: number | undefined;
284
- totalTokens?: number | undefined;
285
- reasoningTokens?: number | undefined;
286
- cachedInputTokens?: number | undefined;
287
- } | undefined;
288
- };
289
- }>, z.ZodObject<{
290
- type: z.ZodLiteral<"start-step">;
291
- value: z.ZodObject<{
292
- messageId: z.ZodString;
293
- }, "strip", z.ZodTypeAny, {
294
- messageId: string;
295
- }, {
296
- messageId: string;
297
- }>;
298
- }, "strip", z.ZodTypeAny, {
299
- type: "start-step";
300
- value: {
301
- messageId: string;
302
- };
303
- }, {
304
- type: "start-step";
305
- value: {
306
- messageId: string;
307
- };
308
- }>, z.ZodObject<{
309
- type: z.ZodLiteral<"reasoning">;
310
- value: z.ZodObject<{
311
- text: z.ZodString;
312
- providerMetadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
313
- }, "strip", z.ZodTypeAny, {
314
- text: string;
315
- providerMetadata?: Record<string, any> | undefined;
316
- }, {
317
- text: string;
318
- providerMetadata?: Record<string, any> | undefined;
319
- }>;
320
- }, "strip", z.ZodTypeAny, {
321
- type: "reasoning";
322
- value: {
323
- text: string;
324
- providerMetadata?: Record<string, any> | undefined;
325
- };
326
- }, {
327
- type: "reasoning";
328
- value: {
329
- text: string;
330
- providerMetadata?: Record<string, any> | undefined;
331
- };
332
- }>, z.ZodObject<{
333
- type: z.ZodLiteral<"source">;
334
- value: z.ZodObject<{
335
- type: z.ZodLiteral<"source">;
336
- sourceType: z.ZodLiteral<"url">;
337
- id: z.ZodString;
338
- url: z.ZodString;
339
- title: z.ZodOptional<z.ZodString>;
340
- providerMetadata: z.ZodOptional<z.ZodAny>;
341
- }, "strip", z.ZodTypeAny, {
342
- type: "source";
343
- sourceType: "url";
344
- url: string;
345
- id: string;
346
- providerMetadata?: any;
347
- title?: string | undefined;
348
- }, {
349
- type: "source";
350
- sourceType: "url";
351
- url: string;
352
- id: string;
353
- providerMetadata?: any;
354
- title?: string | undefined;
355
- }>;
356
- }, "strip", z.ZodTypeAny, {
357
- type: "source";
358
- value: {
359
- type: "source";
360
- sourceType: "url";
361
- url: string;
362
- id: string;
363
- providerMetadata?: any;
364
- title?: string | undefined;
365
- };
366
- }, {
367
- type: "source";
368
- value: {
369
- type: "source";
370
- sourceType: "url";
371
- url: string;
372
- id: string;
373
- providerMetadata?: any;
374
- title?: string | undefined;
375
- };
376
- }>, z.ZodObject<{
377
- type: z.ZodLiteral<"file">;
378
- value: z.ZodObject<{
379
- url: z.ZodString;
380
- mediaType: z.ZodString;
381
- }, "strip", z.ZodTypeAny, {
382
- url: string;
383
- mediaType: string;
384
- }, {
385
- url: string;
386
- mediaType: string;
387
- }>;
388
- }, "strip", z.ZodTypeAny, {
389
- type: "file";
390
- value: {
391
- url: string;
392
- mediaType: string;
393
- };
394
- }, {
395
- type: "file";
396
- value: {
397
- url: string;
398
- mediaType: string;
399
- };
400
- }>, z.ZodObject<{
401
- type: z.ZodLiteral<"reasoning-part-finish">;
402
- value: z.ZodNull;
403
- }, "strip", z.ZodTypeAny, {
404
- type: "reasoning-part-finish";
405
- value: null;
406
- }, {
407
- type: "reasoning-part-finish";
408
- value: null;
409
- }>]>;
410
- type DataStreamPart = z.infer<typeof dataStreamPartSchema>;
411
-
412
- interface DataStreamWriter {
413
- /**
414
- * Appends a data stream part to the stream.
415
- */
416
- write(part: DataStreamPart): void;
417
- /**
418
- * Merges the contents of another stream to this stream.
419
- */
420
- merge(stream: ReadableStream<DataStreamPart>): void;
421
- /**
422
- * Error handler that is used by the data stream writer.
423
- * This is intended for forwarding when merging streams
424
- * to prevent duplicated error masking.
425
- */
426
- onError: ((error: unknown) => string) | undefined;
427
- }
428
-
429
- declare function createDataStream({ execute, onError, }: {
430
- execute: (writer: DataStreamWriter) => Promise<void> | void;
431
- onError?: (error: unknown) => string;
432
- }): ReadableStream<DataStreamPart>;
433
-
434
- declare function createDataStreamResponse({ status, statusText, headers, dataStream, }: ResponseInit & {
435
- dataStream: ReadableStream<DataStreamPart>;
436
- }): Response;
437
-
438
- declare function pipeDataStreamToResponse({ response, status, statusText, headers, dataStream, }: {
439
- response: ServerResponse;
440
- dataStream: ReadableStream<DataStreamPart>;
441
- } & ResponseInit): void;
442
-
443
- declare function processDataStream({ stream, onTextPart, onReasoningPart, onReasoningPartFinish, onSourcePart, onFilePart, onDataPart, onErrorPart, onToolCallStreamingStartPart, onToolCallDeltaPart, onToolCallPart, onToolResultPart, onMessageAnnotationsPart, onFinishMessagePart, onFinishStepPart, onStartStepPart, }: {
444
- stream: ReadableStream<Uint8Array>;
445
- onTextPart?: (streamPart: (DataStreamPart & {
446
- type: 'text';
447
- })['value']) => Promise<void> | void;
448
- onReasoningPart?: (streamPart: (DataStreamPart & {
449
- type: 'reasoning';
450
- })['value']) => Promise<void> | void;
451
- onReasoningPartFinish?: (streamPart: (DataStreamPart & {
452
- type: 'reasoning-part-finish';
453
- })['value']) => Promise<void> | void;
454
- onFilePart?: (streamPart: (DataStreamPart & {
455
- type: 'file';
456
- })['value']) => Promise<void> | void;
457
- onSourcePart?: (streamPart: (DataStreamPart & {
458
- type: 'source';
459
- })['value']) => Promise<void> | void;
460
- onDataPart?: (streamPart: (DataStreamPart & {
461
- type: 'data';
462
- })['value']) => Promise<void> | void;
463
- onErrorPart?: (streamPart: (DataStreamPart & {
464
- type: 'error';
465
- })['value']) => Promise<void> | void;
466
- onToolCallStreamingStartPart?: (streamPart: (DataStreamPart & {
467
- type: 'tool-call-streaming-start';
468
- })['value']) => Promise<void> | void;
469
- onToolCallDeltaPart?: (streamPart: (DataStreamPart & {
470
- type: 'tool-call-delta';
471
- })['value']) => Promise<void> | void;
472
- onToolCallPart?: (streamPart: ToolCall<string, any>) => Promise<void> | void;
473
- onToolResultPart?: (streamPart: ToolResult<string, any, any>) => Promise<void> | void;
474
- onMessageAnnotationsPart?: (streamPart: (DataStreamPart & {
475
- type: 'message-annotations';
476
- })['value']) => Promise<void> | void;
477
- onFinishMessagePart?: (streamPart: (DataStreamPart & {
478
- type: 'finish-message';
479
- })['value']) => Promise<void> | void;
480
- onFinishStepPart?: (streamPart: (DataStreamPart & {
481
- type: 'finish-step';
482
- })['value']) => Promise<void> | void;
483
- onStartStepPart?: (streamPart: (DataStreamPart & {
484
- type: 'start-step';
485
- })['value']) => Promise<void> | void;
486
- }): Promise<void>;
487
-
488
- declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
489
- constructor();
490
- }
491
-
492
10
  declare const symbol$f: unique symbol;
493
11
  declare class InvalidArgumentError extends AISDKError {
494
12
  private readonly [symbol$f];
@@ -1156,26 +674,26 @@ declare const JSONRPCErrorSchema: z.ZodObject<{
1156
674
  message: z.ZodString;
1157
675
  data: z.ZodOptional<z.ZodUnknown>;
1158
676
  }, "strip", z.ZodTypeAny, {
1159
- code: number;
1160
677
  message: string;
678
+ code: number;
1161
679
  data?: unknown;
1162
680
  }, {
1163
- code: number;
1164
681
  message: string;
682
+ code: number;
1165
683
  data?: unknown;
1166
684
  }>;
1167
685
  }, "strict", z.ZodTypeAny, {
1168
686
  error: {
1169
- code: number;
1170
687
  message: string;
688
+ code: number;
1171
689
  data?: unknown;
1172
690
  };
1173
691
  id: string | number;
1174
692
  jsonrpc: "2.0";
1175
693
  }, {
1176
694
  error: {
1177
- code: number;
1178
695
  message: string;
696
+ code: number;
1179
697
  data?: unknown;
1180
698
  };
1181
699
  id: string | number;
@@ -1290,26 +808,26 @@ declare const JSONRPCMessageSchema: z.ZodUnion<[z.ZodObject<z.objectUtil.extendS
1290
808
  message: z.ZodString;
1291
809
  data: z.ZodOptional<z.ZodUnknown>;
1292
810
  }, "strip", z.ZodTypeAny, {
1293
- code: number;
1294
811
  message: string;
812
+ code: number;
1295
813
  data?: unknown;
1296
814
  }, {
1297
- code: number;
1298
815
  message: string;
816
+ code: number;
1299
817
  data?: unknown;
1300
818
  }>;
1301
819
  }, "strict", z.ZodTypeAny, {
1302
820
  error: {
1303
- code: number;
1304
821
  message: string;
822
+ code: number;
1305
823
  data?: unknown;
1306
824
  };
1307
825
  id: string | number;
1308
826
  jsonrpc: "2.0";
1309
827
  }, {
1310
828
  error: {
1311
- code: number;
1312
829
  message: string;
830
+ code: number;
1313
831
  data?: unknown;
1314
832
  };
1315
833
  id: string | number;
@@ -2228,6 +1746,39 @@ type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
2228
1746
  }>;
2229
1747
  type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
2230
1748
 
1749
+ type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
1750
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
1751
+ };
1752
+ type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
1753
+ [NAME in keyof TOOLS]: {
1754
+ type: 'tool-result';
1755
+ toolCallId: string;
1756
+ toolName: NAME & string;
1757
+ args: TOOLS[NAME] extends Tool<infer P> ? P : never;
1758
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
1759
+ };
1760
+ }>;
1761
+ type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<TOOLS>>;
1762
+ type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
1763
+
1764
+ type ContentPart<TOOLS extends ToolSet> = {
1765
+ type: 'text';
1766
+ text: string;
1767
+ } | {
1768
+ type: 'reasoning';
1769
+ text: string;
1770
+ providerMetadata?: ProviderMetadata;
1771
+ } | ({
1772
+ type: 'source';
1773
+ } & Source) | {
1774
+ type: 'file';
1775
+ file: GeneratedFile;
1776
+ } | ({
1777
+ type: 'tool-call';
1778
+ } & ToolCallUnion<TOOLS>) | ({
1779
+ type: 'tool-result';
1780
+ } & ToolResultUnion<TOOLS>);
1781
+
2231
1782
  declare const symbol$e: unique symbol;
2232
1783
  declare class InvalidToolArgumentsError extends AISDKError {
2233
1784
  private readonly [symbol$e];
@@ -2261,15 +1812,11 @@ type CallSettings = {
2261
1812
  */
2262
1813
  maxOutputTokens?: number;
2263
1814
  /**
2264
- Temperature setting. This is a number between 0 (almost no randomness) and
2265
- 1 (very random).
1815
+ Temperature setting. The range depends on the provider and model.
2266
1816
 
2267
1817
  It is recommended to set either `temperature` or `topP`, but not both.
2268
- Use `null` to use the provider's default temperature.
2269
-
2270
- @default 0
2271
1818
  */
2272
- temperature?: number | null;
1819
+ temperature?: number;
2273
1820
  /**
2274
1821
  Nucleus sampling. This is a number between 0 and 1.
2275
1822
 
@@ -2377,41 +1924,12 @@ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
2377
1924
  error: NoSuchToolError | InvalidToolArgumentsError;
2378
1925
  }) => Promise<LanguageModelV2ToolCall | null>;
2379
1926
 
2380
- type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
2381
- [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
2382
- };
2383
- type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
2384
- [NAME in keyof TOOLS]: {
2385
- type: 'tool-result';
2386
- toolCallId: string;
2387
- toolName: NAME & string;
2388
- args: TOOLS[NAME] extends Tool<infer P> ? P : never;
2389
- result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
2390
- };
2391
- }>;
2392
- type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<TOOLS>>;
2393
- type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
2394
-
2395
- type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
1927
+ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = ContentPart<TOOLS> | {
2396
1928
  type: 'stream-start';
2397
1929
  warnings: LanguageModelV2CallWarning[];
2398
- } | {
2399
- type: 'text';
2400
- text: string;
2401
- } | {
2402
- type: 'reasoning';
2403
- text: string;
2404
- providerMetadata?: ProviderMetadata;
2405
1930
  } | {
2406
1931
  type: 'reasoning-part-finish';
2407
1932
  } | {
2408
- type: 'file';
2409
- file: GeneratedFile;
2410
- } | ({
2411
- type: 'source';
2412
- } & Source) | ({
2413
- type: 'tool-call';
2414
- } & ToolCallUnion<TOOLS>) | {
2415
1933
  type: 'tool-call-streaming-start';
2416
1934
  toolCallId: string;
2417
1935
  toolName: string;
@@ -2420,9 +1938,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
2420
1938
  toolCallId: string;
2421
1939
  toolName: string;
2422
1940
  argsTextDelta: string;
2423
- } | ({
2424
- type: 'tool-result';
2425
- } & ToolResultUnion<TOOLS>) | {
1941
+ } | {
2426
1942
  type: 'response-metadata';
2427
1943
  id?: string;
2428
1944
  timestamp?: Date;
@@ -2609,23 +2125,19 @@ type ToolInvocation = ({
2609
2125
  /**
2610
2126
  * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
2611
2127
  */
2612
- interface UIMessage {
2128
+ interface UIMessage<METADATA = unknown> {
2613
2129
  /**
2614
2130
  A unique identifier for the message.
2615
2131
  */
2616
2132
  id: string;
2617
2133
  /**
2618
- The timestamp of the message.
2619
- */
2620
- createdAt?: Date;
2621
- /**
2622
2134
  The role of the message.
2623
2135
  */
2624
2136
  role: 'system' | 'user' | 'assistant';
2625
2137
  /**
2626
- Additional message-specific information added on the server via StreamData
2138
+ The metadata of the message.
2627
2139
  */
2628
- annotations?: JSONValue$1[] | undefined;
2140
+ metadata?: METADATA;
2629
2141
  /**
2630
2142
  The parts of the message. Use this for rendering the message in the UI.
2631
2143
 
@@ -2710,8 +2222,8 @@ type FileUIPart = {
2710
2222
  type StepStartUIPart = {
2711
2223
  type: 'step-start';
2712
2224
  };
2713
- type CreateUIMessage = Omit<UIMessage, 'id'> & {
2714
- id?: UIMessage['id'];
2225
+ type CreateUIMessage<METADATA = unknown> = Omit<UIMessage<METADATA>, 'id'> & {
2226
+ id?: UIMessage<METADATA>['id'];
2715
2227
  };
2716
2228
 
2717
2229
  declare const symbol$3: unique symbol;
@@ -2773,138 +2285,7 @@ declare function pipeTextStreamToResponse({ response, status, statusText, header
2773
2285
  declare function appendClientMessage({ messages, message, }: {
2774
2286
  messages: UIMessage[];
2775
2287
  message: UIMessage;
2776
- }): UIMessage[];
2777
-
2778
- type ContentPart<TOOLS extends ToolSet> = {
2779
- type: 'text';
2780
- text: string;
2781
- } | {
2782
- type: 'reasoning';
2783
- text: string;
2784
- providerMetadata?: ProviderMetadata;
2785
- } | ({
2786
- type: 'source';
2787
- } & Source) | {
2788
- type: 'file';
2789
- file: GeneratedFile;
2790
- } | ({
2791
- type: 'tool-call';
2792
- } & ToolCallUnion<TOOLS>) | ({
2793
- type: 'tool-result';
2794
- } & ToolResultUnion<TOOLS>);
2795
-
2796
- /**
2797
- A message that was generated during the generation process.
2798
- It can be either an assistant message or a tool message.
2799
- */
2800
- type ResponseMessage = (AssistantModelMessage | ToolModelMessage) & {
2801
- /**
2802
- Message ID generated by the AI SDK.
2803
- */
2804
- id: string;
2805
- };
2806
- /**
2807
- * The result of a single step in the generation process.
2808
- */
2809
- type StepResult<TOOLS extends ToolSet> = {
2810
- /**
2811
- The content that was generated in the last step.
2812
- */
2813
- readonly content: Array<ContentPart<TOOLS>>;
2814
- /**
2815
- The generated text.
2816
- */
2817
- readonly text: string;
2818
- /**
2819
- The reasoning that was generated during the generation.
2820
- */
2821
- readonly reasoning: Array<ReasoningPart>;
2822
- /**
2823
- The reasoning text that was generated during the generation.
2824
- */
2825
- readonly reasoningText: string | undefined;
2826
- /**
2827
- The files that were generated during the generation.
2828
- */
2829
- readonly files: Array<GeneratedFile>;
2830
- /**
2831
- The sources that were used to generate the text.
2832
- */
2833
- readonly sources: Array<Source>;
2834
- /**
2835
- The tool calls that were made during the generation.
2836
- */
2837
- readonly toolCalls: ToolCallArray<TOOLS>;
2838
- /**
2839
- The results of the tool calls.
2840
- */
2841
- readonly toolResults: ToolResultArray<TOOLS>;
2842
- /**
2843
- The reason why the generation finished.
2844
- */
2845
- readonly finishReason: FinishReason;
2846
- /**
2847
- The token usage of the generated text.
2848
- */
2849
- readonly usage: LanguageModelUsage;
2850
- /**
2851
- Warnings from the model provider (e.g. unsupported settings).
2852
- */
2853
- readonly warnings: CallWarning[] | undefined;
2854
- /**
2855
- Additional request information.
2856
- */
2857
- readonly request: LanguageModelRequestMetadata;
2858
- /**
2859
- Additional response information.
2860
- */
2861
- readonly response: LanguageModelResponseMetadata & {
2862
- /**
2863
- The response messages that were generated during the call.
2864
- Response messages can be either assistant messages or tool messages.
2865
- They contain a generated id.
2866
- */
2867
- readonly messages: Array<ResponseMessage>;
2868
- /**
2869
- Response body (available only for providers that use HTTP requests).
2870
- */
2871
- body?: unknown;
2872
- };
2873
- /**
2874
- Additional provider-specific metadata. They are passed through
2875
- from the provider to the AI SDK and enable provider-specific
2876
- results that can be fully encapsulated in the provider.
2877
- */
2878
- readonly providerMetadata: ProviderMetadata | undefined;
2879
- /**
2880
- The type of step that this result is for. The first step is always
2881
- an "initial" step, and subsequent steps are either "continue" steps
2882
- or "tool-result" steps.
2883
- */
2884
- readonly stepType: 'initial' | 'continue' | 'tool-result';
2885
- /**
2886
- True when there will be a continuation step with a continuation text.
2887
- */
2888
- readonly isContinued: boolean;
2889
- };
2890
-
2891
- /**
2892
- * Appends the ResponseMessage[] from the response to a Message[] (for useChat).
2893
- * The messages are converted to Messages before being appended.
2894
- * Timestamps are generated for the new messages.
2895
- *
2896
- * @returns A new Message[] with the response messages appended.
2897
- */
2898
- declare function appendResponseMessages({ messages, responseMessages, _internal: { currentDate }, }: {
2899
- messages: UIMessage[];
2900
- responseMessages: ResponseMessage[];
2901
- /**
2902
- Internal. For test use only. May change without notice.
2903
- */
2904
- _internal?: {
2905
- currentDate?: () => Date;
2906
- };
2907
- }): UIMessage[];
2288
+ }): UIMessage<unknown>[];
2908
2289
 
2909
2290
  type ChatRequestOptions = {
2910
2291
  /**
@@ -2915,16 +2296,13 @@ type ChatRequestOptions = {
2915
2296
  Additional body JSON properties that should be sent to the API endpoint.
2916
2297
  */
2917
2298
  body?: object;
2299
+ };
2300
+ type UseChatOptions<MESSAGE_METADATA = unknown> = {
2918
2301
  /**
2919
- Additional data to be sent to the API endpoint.
2920
- */
2921
- data?: JSONValue$1;
2922
- /**
2923
- * Allow submitting an empty message. Defaults to `false`.
2302
+ * Schema for the message metadata. Validates the message metadata.
2303
+ * Message metadata can be undefined or must match the schema.
2924
2304
  */
2925
- allowEmptySubmit?: boolean;
2926
- };
2927
- type UseChatOptions = {
2305
+ messageMetadataSchema?: Schema<MESSAGE_METADATA>;
2928
2306
  /**
2929
2307
  * The API endpoint that accepts a `{ messages: Message[] }` object and returns
2930
2308
  * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
@@ -2939,7 +2317,7 @@ type UseChatOptions = {
2939
2317
  /**
2940
2318
  * Initial messages of the chat. Useful to load an existing chat history.
2941
2319
  */
2942
- initialMessages?: UIMessage[];
2320
+ initialMessages?: UIMessage<NoInfer<MESSAGE_METADATA>>[];
2943
2321
  /**
2944
2322
  * Initial input of the chat.
2945
2323
  */
@@ -2954,20 +2332,13 @@ type UseChatOptions = {
2954
2332
  onToolCall?: ({ toolCall, }: {
2955
2333
  toolCall: ToolCall<string, unknown>;
2956
2334
  }) => void | Promise<unknown> | unknown;
2957
- /**
2958
- * Callback function to be called when the API response is received.
2959
- */
2960
- onResponse?: (response: Response) => void | Promise<void>;
2961
2335
  /**
2962
2336
  * Optional callback function that is called when the assistant message is finished streaming.
2963
2337
  *
2964
2338
  * @param message The message that was streamed.
2965
- * @param options.usage The token usage of the message.
2966
- * @param options.finishReason The finish reason of the message.
2967
2339
  */
2968
- onFinish?: (message: UIMessage, options: {
2969
- usage: LanguageModelUsage;
2970
- finishReason: LanguageModelV2FinishReason;
2340
+ onFinish?: (options: {
2341
+ message: UIMessage<NoInfer<MESSAGE_METADATA>>;
2971
2342
  }) => void;
2972
2343
  /**
2973
2344
  * Callback function to be called when an error is encountered.
@@ -3002,9 +2373,9 @@ type UseChatOptions = {
3002
2373
  */
3003
2374
  body?: object;
3004
2375
  /**
3005
- Streaming protocol that is used. Defaults to `data`.
2376
+ Streaming protocol that is used. Defaults to `ui-message`.
3006
2377
  */
3007
- streamProtocol?: 'data' | 'text';
2378
+ streamProtocol?: 'ui-message' | 'text';
3008
2379
  /**
3009
2380
  Custom fetch implementation. You can use it as a middleware to intercept requests,
3010
2381
  or to provide a custom fetch implementation for e.g. testing.
@@ -3022,30 +2393,27 @@ type UseChatOptions = {
3022
2393
  };
3023
2394
 
3024
2395
  declare const getOriginalFetch$1: () => typeof fetch;
3025
- declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, getCurrentDate, requestType, }: {
2396
+ declare function callChatApi<MESSAGE_METADATA>({ api, body, streamProtocol, credentials, headers, abortController, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, requestType, messageMetadataSchema, }: {
3026
2397
  api: string;
3027
2398
  body: Record<string, any>;
3028
- streamProtocol: 'data' | 'text' | undefined;
2399
+ streamProtocol: 'ui-message' | 'text' | undefined;
3029
2400
  credentials: RequestCredentials | undefined;
3030
2401
  headers: HeadersInit | undefined;
3031
2402
  abortController: (() => AbortController | null) | undefined;
3032
- onResponse: ((response: Response) => void | Promise<void>) | undefined;
3033
2403
  onUpdate: (options: {
3034
- message: UIMessage;
3035
- data: JSONValue$1[] | undefined;
3036
- replaceLastMessage: boolean;
2404
+ message: UIMessage<MESSAGE_METADATA>;
3037
2405
  }) => void;
3038
- onFinish: UseChatOptions['onFinish'];
3039
- onToolCall: UseChatOptions['onToolCall'];
2406
+ onFinish: UseChatOptions<MESSAGE_METADATA>['onFinish'];
2407
+ onToolCall: UseChatOptions<MESSAGE_METADATA>['onToolCall'];
3040
2408
  generateId: IdGenerator;
3041
2409
  fetch: ReturnType<typeof getOriginalFetch$1> | undefined;
3042
- lastMessage: UIMessage | undefined;
3043
- getCurrentDate: () => Date;
2410
+ lastMessage: UIMessage<MESSAGE_METADATA> | undefined;
3044
2411
  requestType?: 'generate' | 'resume';
2412
+ messageMetadataSchema?: Schema<MESSAGE_METADATA>;
3045
2413
  }): Promise<void>;
3046
2414
 
3047
2415
  declare const getOriginalFetch: () => typeof fetch;
3048
- declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onResponse, onFinish, onError, onData, fetch, }: {
2416
+ declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
3049
2417
  api: string;
3050
2418
  prompt: string;
3051
2419
  credentials: RequestCredentials | undefined;
@@ -3056,10 +2424,8 @@ declare function callCompletionApi({ api, prompt, credentials, headers, body, st
3056
2424
  setLoading: (loading: boolean) => void;
3057
2425
  setError: (error: Error | undefined) => void;
3058
2426
  setAbortController: (abortController: AbortController | null) => void;
3059
- onResponse: ((response: Response) => void | Promise<void>) | undefined;
3060
2427
  onFinish: ((prompt: string, completion: string) => void) | undefined;
3061
2428
  onError: ((error: Error) => void) | undefined;
3062
- onData: ((data: JSONValue$1[]) => void) | undefined;
3063
2429
  fetch: ReturnType<typeof getOriginalFetch> | undefined;
3064
2430
  }): Promise<string | null | undefined>;
3065
2431
 
@@ -3146,10 +2512,6 @@ type UseCompletionOptions = {
3146
2512
  * Initial completion result. Useful to load an existing history.
3147
2513
  */
3148
2514
  initialCompletion?: string;
3149
- /**
3150
- * Callback function to be called when the API response is received.
3151
- */
3152
- onResponse?: (response: Response) => void | Promise<void>;
3153
2515
  /**
3154
2516
  * Callback function to be called when the completion is finished streaming.
3155
2517
  */
@@ -3186,11 +2548,374 @@ type UseCompletionOptions = {
3186
2548
  */
3187
2549
  streamProtocol?: 'data' | 'text';
3188
2550
  /**
3189
- Custom fetch implementation. You can use it as a middleware to intercept requests,
3190
- or to provide a custom fetch implementation for e.g. testing.
3191
- */
3192
- fetch?: FetchFunction;
3193
- };
2551
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
2552
+ or to provide a custom fetch implementation for e.g. testing.
2553
+ */
2554
+ fetch?: FetchFunction;
2555
+ };
2556
+
2557
+ declare const uiMessageStreamPartSchema: z.ZodDiscriminatedUnion<"type", [z.ZodObject<{
2558
+ type: z.ZodLiteral<"text">;
2559
+ value: z.ZodString;
2560
+ }, "strip", z.ZodTypeAny, {
2561
+ value: string;
2562
+ type: "text";
2563
+ }, {
2564
+ value: string;
2565
+ type: "text";
2566
+ }>, z.ZodObject<{
2567
+ type: z.ZodLiteral<"error">;
2568
+ value: z.ZodString;
2569
+ }, "strip", z.ZodTypeAny, {
2570
+ value: string;
2571
+ type: "error";
2572
+ }, {
2573
+ value: string;
2574
+ type: "error";
2575
+ }>, z.ZodObject<{
2576
+ type: z.ZodLiteral<"tool-call">;
2577
+ value: z.ZodObject<{
2578
+ toolCallId: z.ZodString;
2579
+ toolName: z.ZodString;
2580
+ args: z.ZodUnknown;
2581
+ }, "strip", z.ZodTypeAny, {
2582
+ toolName: string;
2583
+ toolCallId: string;
2584
+ args?: unknown;
2585
+ }, {
2586
+ toolName: string;
2587
+ toolCallId: string;
2588
+ args?: unknown;
2589
+ }>;
2590
+ }, "strip", z.ZodTypeAny, {
2591
+ value: {
2592
+ toolName: string;
2593
+ toolCallId: string;
2594
+ args?: unknown;
2595
+ };
2596
+ type: "tool-call";
2597
+ }, {
2598
+ value: {
2599
+ toolName: string;
2600
+ toolCallId: string;
2601
+ args?: unknown;
2602
+ };
2603
+ type: "tool-call";
2604
+ }>, z.ZodObject<{
2605
+ type: z.ZodLiteral<"tool-result">;
2606
+ value: z.ZodObject<{
2607
+ toolCallId: z.ZodString;
2608
+ result: z.ZodUnknown;
2609
+ providerMetadata: z.ZodOptional<z.ZodAny>;
2610
+ }, "strip", z.ZodTypeAny, {
2611
+ toolCallId: string;
2612
+ result?: unknown;
2613
+ providerMetadata?: any;
2614
+ }, {
2615
+ toolCallId: string;
2616
+ result?: unknown;
2617
+ providerMetadata?: any;
2618
+ }>;
2619
+ }, "strip", z.ZodTypeAny, {
2620
+ value: {
2621
+ toolCallId: string;
2622
+ result?: unknown;
2623
+ providerMetadata?: any;
2624
+ };
2625
+ type: "tool-result";
2626
+ }, {
2627
+ value: {
2628
+ toolCallId: string;
2629
+ result?: unknown;
2630
+ providerMetadata?: any;
2631
+ };
2632
+ type: "tool-result";
2633
+ }>, z.ZodObject<{
2634
+ type: z.ZodLiteral<"tool-call-streaming-start">;
2635
+ value: z.ZodObject<{
2636
+ toolCallId: z.ZodString;
2637
+ toolName: z.ZodString;
2638
+ }, "strip", z.ZodTypeAny, {
2639
+ toolName: string;
2640
+ toolCallId: string;
2641
+ }, {
2642
+ toolName: string;
2643
+ toolCallId: string;
2644
+ }>;
2645
+ }, "strip", z.ZodTypeAny, {
2646
+ value: {
2647
+ toolName: string;
2648
+ toolCallId: string;
2649
+ };
2650
+ type: "tool-call-streaming-start";
2651
+ }, {
2652
+ value: {
2653
+ toolName: string;
2654
+ toolCallId: string;
2655
+ };
2656
+ type: "tool-call-streaming-start";
2657
+ }>, z.ZodObject<{
2658
+ type: z.ZodLiteral<"tool-call-delta">;
2659
+ value: z.ZodObject<{
2660
+ toolCallId: z.ZodString;
2661
+ argsTextDelta: z.ZodString;
2662
+ }, "strip", z.ZodTypeAny, {
2663
+ toolCallId: string;
2664
+ argsTextDelta: string;
2665
+ }, {
2666
+ toolCallId: string;
2667
+ argsTextDelta: string;
2668
+ }>;
2669
+ }, "strip", z.ZodTypeAny, {
2670
+ value: {
2671
+ toolCallId: string;
2672
+ argsTextDelta: string;
2673
+ };
2674
+ type: "tool-call-delta";
2675
+ }, {
2676
+ value: {
2677
+ toolCallId: string;
2678
+ argsTextDelta: string;
2679
+ };
2680
+ type: "tool-call-delta";
2681
+ }>, z.ZodObject<{
2682
+ type: z.ZodLiteral<"reasoning">;
2683
+ value: z.ZodObject<{
2684
+ text: z.ZodString;
2685
+ providerMetadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
2686
+ }, "strip", z.ZodTypeAny, {
2687
+ text: string;
2688
+ providerMetadata?: Record<string, any> | undefined;
2689
+ }, {
2690
+ text: string;
2691
+ providerMetadata?: Record<string, any> | undefined;
2692
+ }>;
2693
+ }, "strip", z.ZodTypeAny, {
2694
+ value: {
2695
+ text: string;
2696
+ providerMetadata?: Record<string, any> | undefined;
2697
+ };
2698
+ type: "reasoning";
2699
+ }, {
2700
+ value: {
2701
+ text: string;
2702
+ providerMetadata?: Record<string, any> | undefined;
2703
+ };
2704
+ type: "reasoning";
2705
+ }>, z.ZodObject<{
2706
+ type: z.ZodLiteral<"source">;
2707
+ value: z.ZodObject<{
2708
+ type: z.ZodLiteral<"source">;
2709
+ sourceType: z.ZodLiteral<"url">;
2710
+ id: z.ZodString;
2711
+ url: z.ZodString;
2712
+ title: z.ZodOptional<z.ZodString>;
2713
+ providerMetadata: z.ZodOptional<z.ZodAny>;
2714
+ }, "strip", z.ZodTypeAny, {
2715
+ type: "source";
2716
+ id: string;
2717
+ url: string;
2718
+ sourceType: "url";
2719
+ providerMetadata?: any;
2720
+ title?: string | undefined;
2721
+ }, {
2722
+ type: "source";
2723
+ id: string;
2724
+ url: string;
2725
+ sourceType: "url";
2726
+ providerMetadata?: any;
2727
+ title?: string | undefined;
2728
+ }>;
2729
+ }, "strip", z.ZodTypeAny, {
2730
+ value: {
2731
+ type: "source";
2732
+ id: string;
2733
+ url: string;
2734
+ sourceType: "url";
2735
+ providerMetadata?: any;
2736
+ title?: string | undefined;
2737
+ };
2738
+ type: "source";
2739
+ }, {
2740
+ value: {
2741
+ type: "source";
2742
+ id: string;
2743
+ url: string;
2744
+ sourceType: "url";
2745
+ providerMetadata?: any;
2746
+ title?: string | undefined;
2747
+ };
2748
+ type: "source";
2749
+ }>, z.ZodObject<{
2750
+ type: z.ZodLiteral<"file">;
2751
+ value: z.ZodObject<{
2752
+ url: z.ZodString;
2753
+ mediaType: z.ZodString;
2754
+ }, "strip", z.ZodTypeAny, {
2755
+ mediaType: string;
2756
+ url: string;
2757
+ }, {
2758
+ mediaType: string;
2759
+ url: string;
2760
+ }>;
2761
+ }, "strip", z.ZodTypeAny, {
2762
+ value: {
2763
+ mediaType: string;
2764
+ url: string;
2765
+ };
2766
+ type: "file";
2767
+ }, {
2768
+ value: {
2769
+ mediaType: string;
2770
+ url: string;
2771
+ };
2772
+ type: "file";
2773
+ }>, z.ZodObject<{
2774
+ type: z.ZodLiteral<"metadata">;
2775
+ value: z.ZodObject<{
2776
+ metadata: z.ZodUnknown;
2777
+ }, "strip", z.ZodTypeAny, {
2778
+ metadata?: unknown;
2779
+ }, {
2780
+ metadata?: unknown;
2781
+ }>;
2782
+ }, "strip", z.ZodTypeAny, {
2783
+ value: {
2784
+ metadata?: unknown;
2785
+ };
2786
+ type: "metadata";
2787
+ }, {
2788
+ value: {
2789
+ metadata?: unknown;
2790
+ };
2791
+ type: "metadata";
2792
+ }>, z.ZodObject<{
2793
+ type: z.ZodLiteral<"start-step">;
2794
+ value: z.ZodObject<{
2795
+ metadata: z.ZodUnknown;
2796
+ }, "strip", z.ZodTypeAny, {
2797
+ metadata?: unknown;
2798
+ }, {
2799
+ metadata?: unknown;
2800
+ }>;
2801
+ }, "strip", z.ZodTypeAny, {
2802
+ value: {
2803
+ metadata?: unknown;
2804
+ };
2805
+ type: "start-step";
2806
+ }, {
2807
+ value: {
2808
+ metadata?: unknown;
2809
+ };
2810
+ type: "start-step";
2811
+ }>, z.ZodObject<{
2812
+ type: z.ZodLiteral<"finish-step">;
2813
+ value: z.ZodObject<{
2814
+ metadata: z.ZodUnknown;
2815
+ }, "strip", z.ZodTypeAny, {
2816
+ metadata?: unknown;
2817
+ }, {
2818
+ metadata?: unknown;
2819
+ }>;
2820
+ }, "strip", z.ZodTypeAny, {
2821
+ value: {
2822
+ metadata?: unknown;
2823
+ };
2824
+ type: "finish-step";
2825
+ }, {
2826
+ value: {
2827
+ metadata?: unknown;
2828
+ };
2829
+ type: "finish-step";
2830
+ }>, z.ZodObject<{
2831
+ type: z.ZodLiteral<"start">;
2832
+ value: z.ZodObject<{
2833
+ messageId: z.ZodOptional<z.ZodString>;
2834
+ metadata: z.ZodUnknown;
2835
+ }, "strip", z.ZodTypeAny, {
2836
+ metadata?: unknown;
2837
+ messageId?: string | undefined;
2838
+ }, {
2839
+ metadata?: unknown;
2840
+ messageId?: string | undefined;
2841
+ }>;
2842
+ }, "strip", z.ZodTypeAny, {
2843
+ value: {
2844
+ metadata?: unknown;
2845
+ messageId?: string | undefined;
2846
+ };
2847
+ type: "start";
2848
+ }, {
2849
+ value: {
2850
+ metadata?: unknown;
2851
+ messageId?: string | undefined;
2852
+ };
2853
+ type: "start";
2854
+ }>, z.ZodObject<{
2855
+ type: z.ZodLiteral<"finish">;
2856
+ value: z.ZodObject<{
2857
+ metadata: z.ZodUnknown;
2858
+ }, "strip", z.ZodTypeAny, {
2859
+ metadata?: unknown;
2860
+ }, {
2861
+ metadata?: unknown;
2862
+ }>;
2863
+ }, "strip", z.ZodTypeAny, {
2864
+ value: {
2865
+ metadata?: unknown;
2866
+ };
2867
+ type: "finish";
2868
+ }, {
2869
+ value: {
2870
+ metadata?: unknown;
2871
+ };
2872
+ type: "finish";
2873
+ }>, z.ZodObject<{
2874
+ type: z.ZodLiteral<"reasoning-part-finish">;
2875
+ value: z.ZodNull;
2876
+ }, "strip", z.ZodTypeAny, {
2877
+ value: null;
2878
+ type: "reasoning-part-finish";
2879
+ }, {
2880
+ value: null;
2881
+ type: "reasoning-part-finish";
2882
+ }>]>;
2883
+ type UIMessageStreamPart = z.infer<typeof uiMessageStreamPartSchema>;
2884
+
2885
+ interface UIMessageStreamWriter {
2886
+ /**
2887
+ * Appends a data stream part to the stream.
2888
+ */
2889
+ write(part: UIMessageStreamPart): void;
2890
+ /**
2891
+ * Merges the contents of another stream to this stream.
2892
+ */
2893
+ merge(stream: ReadableStream<UIMessageStreamPart>): void;
2894
+ /**
2895
+ * Error handler that is used by the data stream writer.
2896
+ * This is intended for forwarding when merging streams
2897
+ * to prevent duplicated error masking.
2898
+ */
2899
+ onError: ((error: unknown) => string) | undefined;
2900
+ }
2901
+
2902
+ declare function createUIMessageStream({ execute, onError, }: {
2903
+ execute: (writer: UIMessageStreamWriter) => Promise<void> | void;
2904
+ onError?: (error: unknown) => string;
2905
+ }): ReadableStream<UIMessageStreamPart>;
2906
+
2907
+ declare function createUIMessageStreamResponse({ status, statusText, headers, stream, }: ResponseInit & {
2908
+ stream: ReadableStream<UIMessageStreamPart>;
2909
+ }): Response;
2910
+
2911
+ declare function pipeUIMessageStreamToResponse({ response, status, statusText, headers, stream, }: {
2912
+ response: ServerResponse;
2913
+ stream: ReadableStream<UIMessageStreamPart>;
2914
+ } & ResponseInit): void;
2915
+
2916
+ declare class JsonToSseTransformStream extends TransformStream<unknown, string> {
2917
+ constructor();
2918
+ }
3194
2919
 
3195
2920
  /**
3196
2921
  * Converts a data URL of type text/* to a text string.
@@ -3430,6 +3155,87 @@ declare function embedMany<VALUE>({ model, values, maxParallelCalls, maxRetries:
3430
3155
  maxParallelCalls?: number;
3431
3156
  }): Promise<EmbedManyResult<VALUE>>;
3432
3157
 
3158
+ /**
3159
+ A message that was generated during the generation process.
3160
+ It can be either an assistant message or a tool message.
3161
+ */
3162
+ type ResponseMessage = AssistantModelMessage | ToolModelMessage;
3163
+
3164
+ /**
3165
+ * The result of a single step in the generation process.
3166
+ */
3167
+ type StepResult<TOOLS extends ToolSet> = {
3168
+ /**
3169
+ The content that was generated in the last step.
3170
+ */
3171
+ readonly content: Array<ContentPart<TOOLS>>;
3172
+ /**
3173
+ The generated text.
3174
+ */
3175
+ readonly text: string;
3176
+ /**
3177
+ The reasoning that was generated during the generation.
3178
+ */
3179
+ readonly reasoning: Array<ReasoningPart>;
3180
+ /**
3181
+ The reasoning text that was generated during the generation.
3182
+ */
3183
+ readonly reasoningText: string | undefined;
3184
+ /**
3185
+ The files that were generated during the generation.
3186
+ */
3187
+ readonly files: Array<GeneratedFile>;
3188
+ /**
3189
+ The sources that were used to generate the text.
3190
+ */
3191
+ readonly sources: Array<Source>;
3192
+ /**
3193
+ The tool calls that were made during the generation.
3194
+ */
3195
+ readonly toolCalls: ToolCallArray<TOOLS>;
3196
+ /**
3197
+ The results of the tool calls.
3198
+ */
3199
+ readonly toolResults: ToolResultArray<TOOLS>;
3200
+ /**
3201
+ The reason why the generation finished.
3202
+ */
3203
+ readonly finishReason: FinishReason;
3204
+ /**
3205
+ The token usage of the generated text.
3206
+ */
3207
+ readonly usage: LanguageModelUsage;
3208
+ /**
3209
+ Warnings from the model provider (e.g. unsupported settings).
3210
+ */
3211
+ readonly warnings: CallWarning[] | undefined;
3212
+ /**
3213
+ Additional request information.
3214
+ */
3215
+ readonly request: LanguageModelRequestMetadata;
3216
+ /**
3217
+ Additional response information.
3218
+ */
3219
+ readonly response: LanguageModelResponseMetadata & {
3220
+ /**
3221
+ The response messages that were generated during the call.
3222
+ Response messages can be either assistant messages or tool messages.
3223
+ They contain a generated id.
3224
+ */
3225
+ readonly messages: Array<ResponseMessage>;
3226
+ /**
3227
+ Response body (available only for providers that use HTTP requests).
3228
+ */
3229
+ body?: unknown;
3230
+ };
3231
+ /**
3232
+ Additional provider-specific metadata. They are passed through
3233
+ from the provider to the AI SDK and enable provider-specific
3234
+ results that can be fully encapsulated in the provider.
3235
+ */
3236
+ readonly providerMetadata: ProviderMetadata | undefined;
3237
+ };
3238
+
3433
3239
  /**
3434
3240
  The result of a `generateText` call.
3435
3241
  It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
@@ -3440,15 +3246,15 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
3440
3246
  */
3441
3247
  readonly content: Array<ContentPart<TOOLS>>;
3442
3248
  /**
3443
- The generated text. If you are using continue steps, this can include text from all steps.
3249
+ The text that was generated in the last step.
3444
3250
  */
3445
3251
  readonly text: string;
3446
3252
  /**
3447
- The full reasoning that the model has generated.
3253
+ The full reasoning that the model has generated in the last step.
3448
3254
  */
3449
3255
  readonly reasoning: Array<ReasoningPart>;
3450
3256
  /**
3451
- The reasoning text that the model has generated. Can be undefined if the model
3257
+ The reasoning text that the model has generated in the last step. Can be undefined if the model
3452
3258
  has only generated text.
3453
3259
  */
3454
3260
  readonly reasoningText: string | undefined;
@@ -3458,28 +3264,32 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
3458
3264
  */
3459
3265
  readonly files: Array<GeneratedFile>;
3460
3266
  /**
3461
- Sources that have been used as input to generate the response.
3462
- For multi-step generation, the sources are accumulated from all steps.
3267
+ Sources that have been used as references in the last step.
3463
3268
  */
3464
3269
  readonly sources: Array<Source>;
3465
3270
  /**
3466
- The tool calls that were made during the generation.
3271
+ The tool calls that were made in the last step.
3467
3272
  */
3468
3273
  readonly toolCalls: ToolCallArray<TOOLS>;
3469
3274
  /**
3470
- The results of the tool calls.
3275
+ The results of the tool calls from the last step.
3471
3276
  */
3472
3277
  readonly toolResults: ToolResultArray<TOOLS>;
3473
3278
  /**
3474
- The reason why the generation finished.
3279
+ The reason why the generation finished.
3475
3280
  */
3476
3281
  readonly finishReason: FinishReason;
3477
3282
  /**
3478
- The token usage of the generated text.
3283
+ The token usage of the last step.
3479
3284
  */
3480
3285
  readonly usage: LanguageModelUsage;
3481
3286
  /**
3482
- Warnings from the model provider (e.g. unsupported settings)
3287
+ The total token usage of all steps.
3288
+ When there are multiple steps, the usage is the sum of all step usages.
3289
+ */
3290
+ readonly totalUsage: LanguageModelUsage;
3291
+ /**
3292
+ Warnings from the model provider (e.g. unsupported settings)
3483
3293
  */
3484
3294
  readonly warnings: CallWarning[] | undefined;
3485
3295
  /**
@@ -3606,7 +3416,7 @@ If set and supported by the model, calls will generate deterministic results.
3606
3416
  @returns
3607
3417
  A result object that contains the generated text, the results of the tool calls, and additional information.
3608
3418
  */
3609
- declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
3419
+ declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
3610
3420
  /**
3611
3421
  The language model to use.
3612
3422
  */
@@ -3628,16 +3438,6 @@ By default, it's set to 1, which means that only a single LLM call is made.
3628
3438
  */
3629
3439
  maxSteps?: number;
3630
3440
  /**
3631
- Generate a unique ID for each message.
3632
- */
3633
- experimental_generateMessageId?: IdGenerator;
3634
- /**
3635
- When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
3636
-
3637
- By default, it's set to false.
3638
- */
3639
- experimental_continueSteps?: boolean;
3640
- /**
3641
3441
  Optional telemetry configuration (experimental).
3642
3442
  */
3643
3443
  experimental_telemetry?: TelemetrySettings;
@@ -3697,18 +3497,44 @@ A function that attempts to repair a tool call that failed to parse.
3697
3497
 
3698
3498
  type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
3699
3499
 
3700
- type DataStreamOptions = {
3500
+ type UIMessageStreamOptions = {
3701
3501
  /**
3702
- * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
3703
- *
3704
- * @return error message to include in the data stream.
3502
+ * Message ID that is sent to the client if a new message is created.
3503
+ * This is intended to be used for the UI message,
3504
+ * if the last original message is not an assistant message
3505
+ * (in which case that message ID is used).
3705
3506
  */
3706
- onError?: (error: unknown) => string;
3507
+ newMessageId?: string;
3707
3508
  /**
3708
- * Send usage parts to the client.
3709
- * Default to true.
3509
+ * The original messages.
3510
+ */
3511
+ originalMessages?: UIMessage[];
3512
+ onFinish?: (options: {
3513
+ /**
3514
+ * The updates list of UI messages.
3515
+ */
3516
+ messages: UIMessage[];
3517
+ /**
3518
+ * Indicates whether the response message is a continuation of the last original message,
3519
+ * or if a new message was created.
3520
+ */
3521
+ isContinuation: boolean;
3522
+ /**
3523
+ * The message that was sent to the client as a response
3524
+ * (including the original message if it was extended).
3525
+ */
3526
+ responseMessage: UIMessage;
3527
+ }) => void;
3528
+ /**
3529
+ * Extracts message metadata that will be send to the client.
3530
+ *
3531
+ * Called on `start` and `finish` events.
3710
3532
  */
3711
- sendUsage?: boolean;
3533
+ messageMetadata?: (options: {
3534
+ part: TextStreamPart<ToolSet> & {
3535
+ type: 'start' | 'finish' | 'start-step' | 'finish-step';
3536
+ };
3537
+ }) => unknown;
3712
3538
  /**
3713
3539
  * Send reasoning parts to the client.
3714
3540
  * Default to false.
@@ -3738,6 +3564,12 @@ type DataStreamOptions = {
3738
3564
  * the message start event from being sent multiple times.
3739
3565
  */
3740
3566
  experimental_sendStart?: boolean;
3567
+ /**
3568
+ * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
3569
+ *
3570
+ * @return error message to include in the data stream.
3571
+ */
3572
+ onError?: (error: unknown) => string;
3741
3573
  };
3742
3574
  type ConsumeStreamOptions = {
3743
3575
  onError?: (error: unknown) => void;
@@ -3777,8 +3609,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3777
3609
  */
3778
3610
  readonly files: Promise<GeneratedFile[]>;
3779
3611
  /**
3780
- Sources that have been used as input to generate the response.
3781
- For multi-step generation, the sources are accumulated from all steps.
3612
+ Sources that have been used as references in the last step.
3782
3613
 
3783
3614
  Resolved when the response is finished.
3784
3615
  */
@@ -3802,12 +3633,18 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3802
3633
  */
3803
3634
  readonly finishReason: Promise<FinishReason>;
3804
3635
  /**
3636
+ The token usage of the last step.
3637
+
3638
+ Resolved when the response is finished.
3639
+ */
3640
+ readonly usage: Promise<LanguageModelUsage>;
3641
+ /**
3805
3642
  The total token usage of the generated response.
3806
3643
  When there are multiple steps, the usage is the sum of all step usages.
3807
3644
 
3808
3645
  Resolved when the response is finished.
3809
3646
  */
3810
- readonly usage: Promise<LanguageModelUsage>;
3647
+ readonly totalUsage: Promise<LanguageModelUsage>;
3811
3648
  /**
3812
3649
  Warnings from the model provider (e.g. unsupported settings) for the first step.
3813
3650
  */
@@ -3869,7 +3706,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3869
3706
  */
3870
3707
  consumeStream(options?: ConsumeStreamOptions): Promise<void>;
3871
3708
  /**
3872
- Converts the result to a data stream.
3709
+ Converts the result to a UI message stream.
3873
3710
 
3874
3711
  @param options.getErrorMessage an optional function that converts an error to an error message.
3875
3712
  @param options.sendUsage whether to send the usage information to the client. Defaults to true.
@@ -3878,11 +3715,11 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3878
3715
  @param options.experimental_sendFinish whether to send the finish information to the client. Defaults to true.
3879
3716
  @param options.experimental_sendStart whether to send the start information to the client. Defaults to true.
3880
3717
 
3881
- @return A data stream.
3718
+ @return A UI message stream.
3882
3719
  */
3883
- toDataStream(options?: DataStreamOptions): ReadableStream<DataStreamPart>;
3720
+ toUIMessageStream(options?: UIMessageStreamOptions): ReadableStream<UIMessageStreamPart>;
3884
3721
  /**
3885
- Writes data stream output to a Node.js response-like object.
3722
+ Writes UI message stream output to a Node.js response-like object.
3886
3723
  @param response A Node.js response-like object (ServerResponse).
3887
3724
  @param options.status The status code.
3888
3725
  @param options.statusText The status text.
@@ -3891,7 +3728,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3891
3728
  @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3892
3729
  @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3893
3730
  */
3894
- pipeDataStreamToResponse(response: ServerResponse, options?: ResponseInit & DataStreamOptions): void;
3731
+ pipeUIMessageStreamToResponse(response: ServerResponse, options?: ResponseInit & UIMessageStreamOptions): void;
3895
3732
  /**
3896
3733
  Writes text delta output to a Node.js response-like object.
3897
3734
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -3902,17 +3739,16 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
3902
3739
  pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
3903
3740
  /**
3904
3741
  Converts the result to a streamed response object with a stream data part stream.
3905
- It can be used with the `useChat` and `useCompletion` hooks.
3742
+
3906
3743
  @param options.status The status code.
3907
3744
  @param options.statusText The status text.
3908
3745
  @param options.headers The headers.
3909
- @param options.data The stream data.
3910
3746
  @param options.getErrorMessage An optional function that converts an error to an error message.
3911
3747
  @param options.sendUsage Whether to send the usage information to the client. Defaults to true.
3912
3748
  @param options.sendReasoning Whether to send the reasoning information to the client. Defaults to false.
3913
3749
  @return A response object.
3914
3750
  */
3915
- toDataStreamResponse(options?: ResponseInit & DataStreamOptions): Response;
3751
+ toUIMessageStreamResponse(options?: ResponseInit & UIMessageStreamOptions): Response;
3916
3752
  /**
3917
3753
  Creates a simple text stream response.
3918
3754
  Each text delta is encoded as UTF-8 and sent as a separate chunk.
@@ -3933,29 +3769,21 @@ type TextStreamPart<TOOLS extends ToolSet> = ContentPart<TOOLS> | {
3933
3769
  toolName: string;
3934
3770
  argsTextDelta: string;
3935
3771
  } | {
3936
- type: 'step-start';
3937
- messageId: string;
3772
+ type: 'start-step';
3938
3773
  request: LanguageModelRequestMetadata;
3939
3774
  warnings: CallWarning[];
3940
3775
  } | {
3941
- type: 'step-finish';
3942
- messageId: string;
3943
- request: LanguageModelRequestMetadata;
3944
- warnings: CallWarning[] | undefined;
3776
+ type: 'finish-step';
3945
3777
  response: LanguageModelResponseMetadata;
3946
3778
  usage: LanguageModelUsage;
3947
3779
  finishReason: FinishReason;
3948
3780
  providerMetadata: ProviderMetadata | undefined;
3949
- isContinued: boolean;
3781
+ } | {
3782
+ type: 'start';
3950
3783
  } | {
3951
3784
  type: 'finish';
3952
3785
  finishReason: FinishReason;
3953
- usage: LanguageModelUsage;
3954
- providerMetadata: ProviderMetadata | undefined;
3955
- /**
3956
- * @deprecated use response on step-finish instead
3957
- */
3958
- response: LanguageModelResponseMetadata;
3786
+ totalUsage: LanguageModelUsage;
3959
3787
  } | {
3960
3788
  type: 'error';
3961
3789
  error: unknown;
@@ -4029,11 +3857,15 @@ Callback that is set using the `onFinish` option.
4029
3857
 
4030
3858
  @param event - The event that is passed to the callback.
4031
3859
  */
4032
- type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: Omit<StepResult<TOOLS>, 'stepType' | 'isContinued'> & {
3860
+ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
4033
3861
  /**
4034
3862
  Details for all steps.
4035
3863
  */
4036
3864
  readonly steps: StepResult<TOOLS>[];
3865
+ /**
3866
+ Total usage for all steps. This is the sum of the usage of all steps.
3867
+ */
3868
+ readonly totalUsage: LanguageModelUsage;
4037
3869
  }) => Promise<void> | void;
4038
3870
  /**
4039
3871
  Generate a text and call tools for a given prompt using a language model.
@@ -4073,7 +3905,6 @@ If set and supported by the model, calls will generate deterministic results.
4073
3905
  @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4074
3906
 
4075
3907
  @param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
4076
- @param experimental_generateMessageId - Generate a unique ID for each message.
4077
3908
 
4078
3909
  @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
4079
3910
  @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
@@ -4084,7 +3915,7 @@ If set and supported by the model, calls will generate deterministic results.
4084
3915
  @return
4085
3916
  A result object for accessing different stream types and additional information.
4086
3917
  */
4087
- declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
3918
+ declare function streamText<TOOLS extends ToolSet, OUTPUT = never, PARTIAL_OUTPUT = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_toolCallStreaming, toolCallStreaming, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, onChunk, onError, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
4088
3919
  /**
4089
3920
  The language model to use.
4090
3921
  */
@@ -4106,16 +3937,6 @@ By default, it's set to 1, which means that only a single LLM call is made.
4106
3937
  */
4107
3938
  maxSteps?: number;
4108
3939
  /**
4109
- Generate a unique ID for each message.
4110
- */
4111
- experimental_generateMessageId?: IdGenerator;
4112
- /**
4113
- When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
4114
-
4115
- By default, it's set to false.
4116
- */
4117
- experimental_continueSteps?: boolean;
4118
- /**
4119
3940
  Optional telemetry configuration (experimental).
4120
3941
  */
4121
3942
  experimental_telemetry?: TelemetrySettings;
@@ -4229,7 +4050,7 @@ as body parameters.
4229
4050
 
4230
4051
  @returns A result object that contains the generated images.
4231
4052
  */
4232
- declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4053
+ declare function generateImage({ model, prompt, n, maxImagesPerCall, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
4233
4054
  /**
4234
4055
  The image model to use.
4235
4056
  */
@@ -4243,6 +4064,10 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
4243
4064
  */
4244
4065
  n?: number;
4245
4066
  /**
4067
+ Number of images to generate.
4068
+ */
4069
+ maxImagesPerCall?: number;
4070
+ /**
4246
4071
  Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
4247
4072
  */
4248
4073
  size?: `${number}x${number}`;
@@ -4347,6 +4172,62 @@ Generate a structured, typed object for a given prompt and schema using a langua
4347
4172
 
4348
4173
  This function does not stream the output. If you want to stream the output, use `streamObject` instead.
4349
4174
 
4175
+ @param model - The language model to use.
4176
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
4177
+
4178
+ @param system - A system message that will be part of the prompt.
4179
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
4180
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
4181
+
4182
+ @param maxOutputTokens - Maximum number of tokens to generate.
4183
+ @param temperature - Temperature setting.
4184
+ The value is passed through to the provider. The range depends on the provider and model.
4185
+ It is recommended to set either `temperature` or `topP`, but not both.
4186
+ @param topP - Nucleus sampling.
4187
+ The value is passed through to the provider. The range depends on the provider and model.
4188
+ It is recommended to set either `temperature` or `topP`, but not both.
4189
+ @param topK - Only sample from the top K options for each subsequent token.
4190
+ Used to remove "long tail" low probability responses.
4191
+ Recommended for advanced use cases only. You usually only need to use temperature.
4192
+ @param presencePenalty - Presence penalty setting.
4193
+ It affects the likelihood of the model to repeat information that is already in the prompt.
4194
+ The value is passed through to the provider. The range depends on the provider and model.
4195
+ @param frequencyPenalty - Frequency penalty setting.
4196
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
4197
+ The value is passed through to the provider. The range depends on the provider and model.
4198
+ @param stopSequences - Stop sequences.
4199
+ If set, the model will stop generating text when one of the stop sequences is generated.
4200
+ @param seed - The seed (integer) to use for random sampling.
4201
+ If set and supported by the model, calls will generate deterministic results.
4202
+
4203
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4204
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4205
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4206
+
4207
+ @param schema - The schema of the object that the model should generate.
4208
+ @param schemaName - Optional name of the output that should be generated.
4209
+ Used by some providers for additional LLM guidance, e.g.
4210
+ via tool or schema name.
4211
+ @param schemaDescription - Optional description of the output that should be generated.
4212
+ Used by some providers for additional LLM guidance, e.g.
4213
+ via tool or schema description.
4214
+
4215
+ @param output - The type of the output.
4216
+
4217
+ - 'object': The output is an object.
4218
+ - 'array': The output is an array.
4219
+ - 'enum': The output is an enum.
4220
+ - 'no-schema': The output is not a schema.
4221
+
4222
+ @param experimental_repairText - A function that attempts to repair the raw output of the mode
4223
+ to enable JSON parsing.
4224
+
4225
+ @param experimental_telemetry - Optional telemetry configuration (experimental).
4226
+
4227
+ @param providerOptions - Additional provider-specific options. They are passed through
4228
+ to the provider from the AI SDK and enable provider-specific
4229
+ functionality that can be fully encapsulated in the provider.
4230
+
4350
4231
  @returns
4351
4232
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
4352
4233
  */
@@ -4548,8 +4429,63 @@ type StreamObjectOnFinishCallback<RESULT> = (event: {
4548
4429
  }) => Promise<void> | void;
4549
4430
  /**
4550
4431
  Generate a structured, typed object for a given prompt and schema using a language model.
4432
+
4551
4433
  This function streams the output. If you do not want to stream the output, use `generateObject` instead.
4552
- @return
4434
+
4435
+ @param model - The language model to use.
4436
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
4437
+
4438
+ @param system - A system message that will be part of the prompt.
4439
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
4440
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
4441
+
4442
+ @param maxOutputTokens - Maximum number of tokens to generate.
4443
+ @param temperature - Temperature setting.
4444
+ The value is passed through to the provider. The range depends on the provider and model.
4445
+ It is recommended to set either `temperature` or `topP`, but not both.
4446
+ @param topP - Nucleus sampling.
4447
+ The value is passed through to the provider. The range depends on the provider and model.
4448
+ It is recommended to set either `temperature` or `topP`, but not both.
4449
+ @param topK - Only sample from the top K options for each subsequent token.
4450
+ Used to remove "long tail" low probability responses.
4451
+ Recommended for advanced use cases only. You usually only need to use temperature.
4452
+ @param presencePenalty - Presence penalty setting.
4453
+ It affects the likelihood of the model to repeat information that is already in the prompt.
4454
+ The value is passed through to the provider. The range depends on the provider and model.
4455
+ @param frequencyPenalty - Frequency penalty setting.
4456
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
4457
+ The value is passed through to the provider. The range depends on the provider and model.
4458
+ @param stopSequences - Stop sequences.
4459
+ If set, the model will stop generating text when one of the stop sequences is generated.
4460
+ @param seed - The seed (integer) to use for random sampling.
4461
+ If set and supported by the model, calls will generate deterministic results.
4462
+
4463
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
4464
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
4465
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
4466
+
4467
+ @param schema - The schema of the object that the model should generate.
4468
+ @param schemaName - Optional name of the output that should be generated.
4469
+ Used by some providers for additional LLM guidance, e.g.
4470
+ via tool or schema name.
4471
+ @param schemaDescription - Optional description of the output that should be generated.
4472
+ Used by some providers for additional LLM guidance, e.g.
4473
+ via tool or schema description.
4474
+
4475
+ @param output - The type of the output.
4476
+
4477
+ - 'object': The output is an object.
4478
+ - 'array': The output is an array.
4479
+ - 'enum': The output is an enum.
4480
+ - 'no-schema': The output is not a schema.
4481
+
4482
+ @param experimental_telemetry - Optional telemetry configuration (experimental).
4483
+
4484
+ @param providerOptions - Additional provider-specific options. They are passed through
4485
+ to the provider from the AI SDK and enable provider-specific
4486
+ functionality that can be fully encapsulated in the provider.
4487
+
4488
+ @returns
4553
4489
  A result object for accessing the partial object stream and additional information.
4554
4490
  */
4555
4491
  declare function streamObject<RESULT extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = RESULT extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
@@ -4736,17 +4672,17 @@ declare function generateSpeech({ model, text, voice, outputFormat, instructions
4736
4672
  */
4737
4673
  declare function defaultSettingsMiddleware({ settings, }: {
4738
4674
  settings: Partial<{
4739
- maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'] | null;
4740
- temperature?: LanguageModelV2CallOptions['temperature'] | null;
4741
- stopSequences?: LanguageModelV2CallOptions['stopSequences'] | null;
4742
- topP?: LanguageModelV2CallOptions['topP'] | null;
4743
- topK?: LanguageModelV2CallOptions['topK'] | null;
4744
- presencePenalty?: LanguageModelV2CallOptions['presencePenalty'] | null;
4745
- frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'] | null;
4746
- responseFormat?: LanguageModelV2CallOptions['responseFormat'] | null;
4747
- seed?: LanguageModelV2CallOptions['seed'] | null;
4748
- tools?: LanguageModelV2CallOptions['tools'] | null;
4749
- toolChoice?: LanguageModelV2CallOptions['toolChoice'] | null;
4675
+ maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'];
4676
+ temperature?: LanguageModelV2CallOptions['temperature'];
4677
+ stopSequences?: LanguageModelV2CallOptions['stopSequences'];
4678
+ topP?: LanguageModelV2CallOptions['topP'];
4679
+ topK?: LanguageModelV2CallOptions['topK'];
4680
+ presencePenalty?: LanguageModelV2CallOptions['presencePenalty'];
4681
+ frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'];
4682
+ responseFormat?: LanguageModelV2CallOptions['responseFormat'];
4683
+ seed?: LanguageModelV2CallOptions['seed'];
4684
+ tools?: LanguageModelV2CallOptions['tools'];
4685
+ toolChoice?: LanguageModelV2CallOptions['toolChoice'];
4750
4686
  headers?: LanguageModelV2CallOptions['headers'];
4751
4687
  providerOptions?: LanguageModelV2CallOptions['providerOptions'];
4752
4688
  }>;
@@ -4958,4 +4894,4 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
4958
4894
  headers?: Record<string, string>;
4959
4895
  }): Promise<TranscriptionResult>;
4960
4896
 
4961
- export { AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatRequestOptions, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UIMessagePart, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, appendResponseMessages, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, createTextStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeDataStreamToResponse, pipeTextStreamToResponse, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };
4897
+ export { AssistantContent, AssistantModelMessage, CallSettings, CallWarning, ChatRequestOptions, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataContent, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, ModelMessage, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RetryError, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, SystemModelMessage, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolModelMessage, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UIMessagePart, UIMessageStreamOptions, UIMessageStreamPart, UIMessageStreamWriter, UseChatOptions, UseCompletionOptions, UserContent, UserModelMessage, appendClientMessage, assistantModelMessageSchema, callChatApi, callCompletionApi, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolInvocations, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, systemModelMessageSchema, tool, toolModelMessageSchema, updateToolCallResult, userModelMessageSchema, wrapLanguageModel };